aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRaghuram Subramani <raghus2247@gmail.com>2024-06-09 10:18:00 +0530
committerRaghuram Subramani <raghus2247@gmail.com>2024-06-09 10:18:00 +0530
commit93dcb1f4265407c8cbf322a0683eba4d4a5b483a (patch)
tree1355aa8f970d54eca0e21bc6f2803eb88e06391f
parent194f7d40561485f5ac3a3556721cfbc542be3b07 (diff)
NN
-rwxr-xr-xneural_network.py20
-rw-r--r--src/loss.py5
-rw-r--r--src/nn.py13
3 files changed, 34 insertions, 4 deletions
diff --git a/neural_network.py b/neural_network.py
index 62f3f9c..593a6ce 100755
--- a/neural_network.py
+++ b/neural_network.py
@@ -1,17 +1,29 @@
#!/usr/bin/env python
from src.nn import MLP
+from src.loss import mse
X = [
- [ 0.0, 0.0, 0.0 ],
- [ 1.0, 1.0, 1.0 ],
+ [ 0.0, 1.0, 2.0 ],
+ [ 2.0, 1.0, 0.0 ],
[ 2.0, 2.0, 2.0 ],
[ 3.0, 3.0, 3.0 ]
]
-y = [ 1.0, 2.0, 3.0, 4.0 ] # X + 1
-
+y = [ 1.0, -1.0, 1.0, -1.0 ]
n = MLP(3, [ 4, 4, 1 ])
pred = [ n(x) for x in X ]
print(pred)
+
+for i in range(400):
+ pred = [ n(x) for x in X ]
+ loss = mse(y, pred)
+
+ loss.zero_grad()
+ loss.backward()
+ n.optimise(0.01)
+
+ print(loss.data)
+
+print(pred)
diff --git a/src/loss.py b/src/loss.py
new file mode 100644
index 0000000..625ff0f
--- /dev/null
+++ b/src/loss.py
@@ -0,0 +1,5 @@
+from .scalar import Scalar
+
+def mse(ys, preds) -> Scalar:
+ losses = [ (y - pred) ** 2 for y, pred in zip(ys, preds) ]
+ return sum(losses)
diff --git a/src/nn.py b/src/nn.py
index e5d20d4..d549091 100644
--- a/src/nn.py
+++ b/src/nn.py
@@ -17,6 +17,9 @@ class Neuron:
return result.tanh()
+ def parameters(self):
+ return self.w + [ self.b ]
+
class Layer:
def __init__(self, n_X, n_y):
self.neurons = [ Neuron(n_X) for _ in range(n_y) ]
@@ -25,6 +28,9 @@ class Layer:
result = [ n(X) for n in self.neurons ]
return result[0] if len(result) == 1 else result
+ def parameters(self):
+ return [ param for neuron in self.neurons for param in neuron.parameters() ]
+
class MLP:
def __init__(self, n_X, layers):
sz = [ n_X ] + layers
@@ -35,3 +41,10 @@ class MLP:
X = layer(X)
return X
+
+ def parameters(self):
+ return [ param for layer in self.layers for param in layer.parameters() ]
+
+ def optimise(self, lr):
+ for parameter in self.parameters():
+ parameter.data -= lr * parameter.grad