add base module class
This commit is contained in:
parent
5b0a350dd4
commit
e49246ac2d
|
@ -2,7 +2,17 @@ import random
|
||||||
from .engine import Value
|
from .engine import Value
|
||||||
from typing import List, Union
|
from typing import List, Union
|
||||||
|
|
||||||
class Neuron:
|
class Module:
|
||||||
|
|
||||||
|
def zero_grad(self):
|
||||||
|
for p in self.parameters():
|
||||||
|
p.grad = 0.0
|
||||||
|
|
||||||
|
def parameters(self):
|
||||||
|
return []
|
||||||
|
|
||||||
|
class Neuron(Module):
|
||||||
|
|
||||||
def __init__(self, n_in: int):
|
def __init__(self, n_in: int):
|
||||||
self.w = [Value(random.uniform(-1, 1)) for _ in range(n_in)]
|
self.w = [Value(random.uniform(-1, 1)) for _ in range(n_in)]
|
||||||
self.b = Value(random.uniform(-1, 1))
|
self.b = Value(random.uniform(-1, 1))
|
||||||
|
@ -16,10 +26,11 @@ class Neuron:
|
||||||
raise ValueError(f"mismatch dimension: x: {len(x)}, w: {len(self.w)}")
|
raise ValueError(f"mismatch dimension: x: {len(x)}, w: {len(self.w)}")
|
||||||
act = reduce(add, [w_i * x_i for w_i, x_i in zip(self.w, x)]) + self.b
|
act = reduce(add, [w_i * x_i for w_i, x_i in zip(self.w, x)]) + self.b
|
||||||
return act.tanh()
|
return act.tanh()
|
||||||
|
|
||||||
def parameters(self):
|
def parameters(self):
|
||||||
return self.w + [self.b]
|
return self.w + [self.b]
|
||||||
|
|
||||||
class Layer:
|
class Layer(Module):
|
||||||
def __init__(self, n_in: int, n_out: int):
|
def __init__(self, n_in: int, n_out: int):
|
||||||
self.neurons = [Neuron(n_in) for _ in range(n_out)]
|
self.neurons = [Neuron(n_in) for _ in range(n_out)]
|
||||||
|
|
||||||
|
@ -35,7 +46,7 @@ class Layer:
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
class MLP:
|
class MLP(Module):
|
||||||
def __init__(self, n_in: int, n_outs: List[int]):
|
def __init__(self, n_in: int, n_outs: List[int]):
|
||||||
sizes = [n_in] + n_outs
|
sizes = [n_in] + n_outs
|
||||||
self.layers = []
|
self.layers = []
|
||||||
|
|
|
@ -19,11 +19,10 @@ def model():
|
||||||
loss = sum([(y_p - y_t)**2 for y_p, y_t in zip(y_pred, y_true)])
|
loss = sum([(y_p - y_t)**2 for y_p, y_t in zip(y_pred, y_true)])
|
||||||
|
|
||||||
# backward
|
# backward
|
||||||
for p in n.parameters():
|
n.zero_grad()
|
||||||
p.grad = 0.0
|
|
||||||
loss.backward()
|
loss.backward()
|
||||||
|
|
||||||
# update
|
# update (sgd)
|
||||||
for p in n.parameters():
|
for p in n.parameters():
|
||||||
p.data += -lr * p.grad
|
p.data += -lr * p.grad
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue