finish migrograd main functions.
This commit is contained in:
parent
4c9f7d8d7d
commit
5b0a350dd4
|
@ -0,0 +1,28 @@
|
|||
# Base Image
|
||||
FROM nvidia/cuda:11.3.1-cudnn8-runtime-ubuntu20.04
|
||||
|
||||
# System Dependencies and Cleanup
|
||||
RUN apt-get update -y && \
|
||||
DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get install -y tzdata && \
|
||||
apt-get install -y software-properties-common ffmpeg libsm6 libxext6 libhdf5-serial-dev netcdf-bin libnetcdf-dev && \
|
||||
add-apt-repository ppa:ubuntugis/ubuntugis-unstable && \
|
||||
apt-get update && \
|
||||
apt-get install -y curl build-essential gdal-bin libgdal-dev libpq-dev python3-gdal python3-pip apt-transport-https ca-certificates gnupg && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy only the necessary files
|
||||
COPY requirements.txt /micrograd/requirements.txt
|
||||
|
||||
# Install Python Packages
|
||||
RUN pip install --no-cache-dir -r /micrograd/requirements.txt
|
||||
|
||||
# Set Working Directory and Prepare App
|
||||
WORKDIR /micrograd
|
||||
COPY micrograd /micrograd/micrograd
|
||||
COPY test /micrograd/test
|
||||
COPY app.py /micrograd/app.py
|
||||
|
||||
RUN mkdir -p /root/.cache/torch/hub/checkpoints/
|
||||
|
||||
CMD ["python3", "app.py"]
|
|
@ -0,0 +1,16 @@
|
|||
NAME=micrograd
|
||||
DOCKER=podman
|
||||
PORT=8008
|
||||
|
||||
.PHONY=build run stop
|
||||
|
||||
all: stop rm build run
|
||||
|
||||
stop:
|
||||
${DOCKER} stop ${NAME}
|
||||
rm:
|
||||
${DOCKER} rm ${NAME}
|
||||
run:
|
||||
${DOCKER} run -p ${PORT}:${PORT} -d --name ${NAME} -t ${NAME}
|
||||
build:
|
||||
${DOCKER} build . --tag ${NAME}
|
|
@ -0,0 +1,30 @@
|
|||
from fastapi import FastAPI, HTTPException
|
||||
from pydantic import BaseModel
|
||||
from micrograd.train import model # Replace with your ML model library
|
||||
from typing import List
|
||||
|
||||
# Create FastAPI app
|
||||
app = FastAPI()
|
||||
|
||||
# Load your ML model (replace this with your model loading logic)
|
||||
# model = engine.load_model("path/to/your/model")
|
||||
|
||||
# Define a request model
|
||||
class Item(BaseModel):
|
||||
data: List[float] # Change 'list' to match the input format of your model
|
||||
|
||||
# Endpoint for ML inference
|
||||
@app.post("/predict")
|
||||
async def predict(item: Item):
|
||||
try:
|
||||
# Perform prediction (modify this part according to your model's API)
|
||||
nn = model()
|
||||
prediction = nn(item.data)
|
||||
return {"prediction": prediction.data}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
# Run the API with Uvicorn
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
uvicorn.run(app, host="0.0.0.0", port=8008)
|
|
@ -0,0 +1,18 @@
|
|||
{ pkgs ? import <nixpkgs> {} }:
|
||||
|
||||
pkgs.mkShell {
|
||||
buildInputs = [
|
||||
pkgs.python310
|
||||
pkgs.python310Packages.numpy
|
||||
pkgs.python310Packages.pytest
|
||||
pkgs.python310Packages.uvicorn
|
||||
pkgs.python310Packages.fastapi
|
||||
];
|
||||
|
||||
# Set PYTHONPATH to include your Python dependencies.
|
||||
# This is useful if you are using Python modules that are not installed in the
|
||||
# standard location.
|
||||
# shellHook = ''
|
||||
# export PYTHONPATH=${pkgs.python3.sitePackages}:$PYTHONPATH
|
||||
# '';
|
||||
}
|
|
@ -1,4 +1,7 @@
|
|||
class Value:
|
||||
"""
|
||||
add a comment
|
||||
"""
|
||||
|
||||
def __init__(self, data, _children=(), _op='', label=''):
|
||||
self.data = data
|
||||
|
@ -30,6 +33,7 @@ class Value:
|
|||
return f"Value(data={self.data})"
|
||||
|
||||
def __add__(self, other):
|
||||
other = other if isinstance(other, Value) else Value(other)
|
||||
out = Value(data=self.data + other.data,
|
||||
_children=(self, other), _op='+')
|
||||
|
||||
|
@ -38,13 +42,37 @@ class Value:
|
|||
other.grad += 1.0 * out.grad
|
||||
out._backward = _backward
|
||||
return out
|
||||
def __radd__(self, other):
|
||||
return self + other
|
||||
|
||||
def __sub__(self, other):
|
||||
out = Value(data=self.data - other.data,
|
||||
_children=(self, other), _op='-')
|
||||
def __pow__(self, other):
|
||||
assert isinstance(other, (int, float)), "int or float for now"
|
||||
other = Value(other)
|
||||
n = other.data
|
||||
out = Value(data=self.data ** n,
|
||||
_children=(self, other), _op=f'**{n}')
|
||||
|
||||
def _backward():
|
||||
self.grad += n * (self.data ** (n-1)) * out.grad
|
||||
out._backward = _backward
|
||||
return out
|
||||
|
||||
def exp(self):
|
||||
from math import exp
|
||||
x = self.data
|
||||
out = Value(data=exp(x),
|
||||
_children=(self,), _op='exp')
|
||||
|
||||
def _backward():
|
||||
self.grad += out.data * out.grad
|
||||
out._backward = _backward
|
||||
return out
|
||||
|
||||
def __sub__(self, other):
|
||||
return self + (-1 * other)
|
||||
|
||||
def __mul__(self, other):
|
||||
other = other if isinstance(other, Value) else Value(other)
|
||||
out = Value(data=self.data * other.data,
|
||||
_children=(self, other), _op='*')
|
||||
|
||||
|
@ -54,6 +82,12 @@ class Value:
|
|||
out._backward = _backward
|
||||
return out
|
||||
|
||||
def __rmul__(self, other):
|
||||
return self * other
|
||||
|
||||
def __truediv__(self, other):
|
||||
return self * other ** -1
|
||||
|
||||
def tanh(self):
|
||||
from math import exp
|
||||
t = (exp(2 * self.data) - 1) / (exp(2 * self.data) + 1)
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
import random
|
||||
from .engine import Value
|
||||
from typing import List, Union
|
||||
|
||||
class Neuron:
|
||||
def __init__(self, n_in: int):
|
||||
self.w = [Value(random.uniform(-1, 1)) for _ in range(n_in)]
|
||||
self.b = Value(random.uniform(-1, 1))
|
||||
|
||||
def __call__(self, x) -> Value:
|
||||
# w * x + b
|
||||
from functools import reduce
|
||||
from operator import add
|
||||
|
||||
if len(x) != len(self.w):
|
||||
raise ValueError(f"mismatch dimension: x: {len(x)}, w: {len(self.w)}")
|
||||
act = reduce(add, [w_i * x_i for w_i, x_i in zip(self.w, x)]) + self.b
|
||||
return act.tanh()
|
||||
def parameters(self):
|
||||
return self.w + [self.b]
|
||||
|
||||
class Layer:
|
||||
def __init__(self, n_in: int, n_out: int):
|
||||
self.neurons = [Neuron(n_in) for _ in range(n_out)]
|
||||
|
||||
def __call__(self, x) -> Union[List[Value], Value]:
|
||||
out = [n(x) for n in self.neurons]
|
||||
return out[0] if len(out) == 1 else out
|
||||
|
||||
def parameters(self):
|
||||
out = []
|
||||
for neuron in self.neurons:
|
||||
for p in neuron.parameters():
|
||||
out.append(p)
|
||||
return out
|
||||
|
||||
|
||||
class MLP:
|
||||
def __init__(self, n_in: int, n_outs: List[int]):
|
||||
sizes = [n_in] + n_outs
|
||||
self.layers = []
|
||||
for i in range(len(n_outs)):
|
||||
self.layers.append(Layer(sizes[i], sizes[i+1]))
|
||||
|
||||
def __call__(self, x):
|
||||
for layer in self.layers:
|
||||
x = layer(x)
|
||||
return x
|
||||
|
||||
def parameters(self):
|
||||
out = []
|
||||
for layer in self.layers:
|
||||
for l in layer.parameters():
|
||||
out.append(l)
|
||||
return out
|
|
@ -0,0 +1,36 @@
|
|||
from micrograd.nn import MLP
|
||||
|
||||
def model():
|
||||
n = MLP(3, [4, 4, 1])
|
||||
xs = [
|
||||
[2.0, 3.0, -1.0],
|
||||
[3.0, -1.0, 0.5],
|
||||
[0.5, 1.0, 1.0],
|
||||
[1.0, 1.0, 1.0],
|
||||
]
|
||||
y_true = [1.0, -1.0, -1.0, 1.0]
|
||||
epochs = 20
|
||||
lr = 0.08
|
||||
for epoch in range(epochs):
|
||||
|
||||
|
||||
# forward
|
||||
y_pred = [n(x) for x in xs]
|
||||
loss = sum([(y_p - y_t)**2 for y_p, y_t in zip(y_pred, y_true)])
|
||||
|
||||
# backward
|
||||
for p in n.parameters():
|
||||
p.grad = 0.0
|
||||
loss.backward()
|
||||
|
||||
# update
|
||||
for p in n.parameters():
|
||||
p.data += -lr * p.grad
|
||||
|
||||
print(f"{epoch=:02d} | loss: {loss.data:4f}")
|
||||
print(f"{[y.data for y in y_pred]}")
|
||||
print(f"{[y for y in y_true]}")
|
||||
return n
|
||||
|
||||
if __name__ == "__main__":
|
||||
model()
|
|
@ -1,3 +1,5 @@
|
|||
pytest
|
||||
notebook
|
||||
graphviz
|
||||
fastapi
|
||||
uvicorn
|
||||
|
|
|
@ -2,16 +2,6 @@ import pytest
|
|||
from micrograd.engine import Value
|
||||
|
||||
|
||||
def test_backward_tanh():
|
||||
# inputs
|
||||
x = Value(0.8814)
|
||||
y = x.tanh()
|
||||
y.grad = 1.0
|
||||
y._backward()
|
||||
|
||||
assert pytest.approx(x.grad, 0.1) == 0.5
|
||||
|
||||
|
||||
def test_large_backprop():
|
||||
# inputs
|
||||
x1 = Value(2.0, label='x1')
|
||||
|
@ -107,3 +97,80 @@ def test_accumulation():
|
|||
b = a + a
|
||||
b.backward()
|
||||
assert a.grad == 2.0
|
||||
|
||||
|
||||
def test_backward_tanh():
|
||||
# inputs
|
||||
x = Value(0.8814)
|
||||
y = x.tanh()
|
||||
y.backward()
|
||||
|
||||
assert pytest.approx(x.grad, 0.1) == 0.5
|
||||
|
||||
|
||||
def test_backward_exp():
|
||||
# inputs
|
||||
x = Value(1.0)
|
||||
y = x.exp()
|
||||
y.backward()
|
||||
|
||||
assert pytest.approx(x.grad, 0.1) == 2.7
|
||||
|
||||
|
||||
def test_backward_pow():
|
||||
# inputs
|
||||
x = Value(1.0)
|
||||
y = x ** 2
|
||||
y.backward()
|
||||
|
||||
assert x.grad == 2.0
|
||||
|
||||
|
||||
def test_backward_div():
|
||||
a = Value(4.0)
|
||||
b = Value(2.0)
|
||||
c = a / b
|
||||
c.backward()
|
||||
assert a.grad == 0.5
|
||||
|
||||
|
||||
def test_auto_diff_replace_tan_with_exp():
|
||||
# inputs
|
||||
x1 = Value(2.0, label='x1')
|
||||
x2 = Value(0.0, label='x2')
|
||||
|
||||
# weights
|
||||
w1 = Value(-3.0, label='w1')
|
||||
w2 = Value(1.0, label='w2')
|
||||
|
||||
# bias
|
||||
b = Value(6.8813735870195432, label='b')
|
||||
|
||||
h1 = x1 * w1
|
||||
h1.label = 'h1'
|
||||
h2 = x2 * w2
|
||||
h2.label = 'h2'
|
||||
|
||||
h = h1 + h2
|
||||
h.label = 'h'
|
||||
|
||||
n = h + b
|
||||
n.label = 'n'
|
||||
e = (2*n).exp()
|
||||
y = (e - 1) / (e + 1)
|
||||
y.label = 'y'
|
||||
|
||||
y.backward()
|
||||
assert pytest.approx(n.grad, 0.001) == 0.5
|
||||
|
||||
assert pytest.approx(b.grad, 0.001) == 0.5
|
||||
assert pytest.approx(h.grad, 0.001) == 0.5
|
||||
|
||||
assert pytest.approx(h1.grad, 0.001) == 0.5
|
||||
assert pytest.approx(h2.grad, 0.001) == 0.5
|
||||
|
||||
assert pytest.approx(x1.grad, 0.001) == -1.5
|
||||
assert pytest.approx(w1.grad, 0.001) == 1.0
|
||||
|
||||
assert pytest.approx(x2.grad, 0.001) == 0.5
|
||||
assert pytest.approx(w2.grad, 0.001) == 0.0
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
from micrograd.nn import Neuron, Layer, MLP
|
||||
from micrograd.engine import Value
|
||||
import pytest
|
||||
|
||||
def test_init_neuron():
|
||||
inputs = 2
|
||||
x = [1.0, 0.0]
|
||||
n = Neuron(inputs)
|
||||
assert len(n.w) == inputs
|
||||
y = n(x)
|
||||
assert isinstance(y, Value)
|
||||
|
||||
def test_mismatch_number():
|
||||
with pytest.raises(ValueError):
|
||||
x = [1.0, 0.0]
|
||||
n = Neuron(7)
|
||||
n(x)
|
||||
|
||||
def test_large_n_in():
|
||||
n_in = 100
|
||||
x = [1.0] * n_in
|
||||
n = Neuron(n_in)
|
||||
y = n(x)
|
||||
assert isinstance(y, Value)
|
||||
|
||||
def test_well_known_weights():
|
||||
x = [1.0, 0.0]
|
||||
w = [Value(0.0), Value(0.0)]
|
||||
b = Value(0.0)
|
||||
|
||||
n = Neuron(2)
|
||||
n.w = w
|
||||
n.b = b
|
||||
y = n(x)
|
||||
|
||||
assert y.data == sum([x[0] * w[0], x[1] * w[1], b]).tanh().data
|
||||
|
||||
|
||||
|
||||
def test_mlp():
|
||||
x = [2.0, 3.0, -1.0]
|
||||
n = MLP(3, [4, 4, 4])
|
||||
y = n(x)
|
||||
assert len(y) == 4
|
||||
|
||||
def test_mlp_single_out():
|
||||
x = [2.0, 3.0, -1.0]
|
||||
n = MLP(3, [4, 4, 1])
|
||||
y = n(x)
|
||||
assert isinstance(y, Value)
|
||||
|
||||
def test_sample_mlp():
|
||||
n = MLP(3, [4, 4, 1])
|
||||
xs = [
|
||||
[2.0, 3.0, -1.0],
|
||||
[3.0, -1.0, 0.5],
|
||||
[0.5, 1.0, 1.0],
|
||||
[1.0, 1.0, 1.0],
|
||||
]
|
||||
y_true = [1.0, -1.0, -1.0, 1.0]
|
||||
y_pred = [n(x) for x in xs]
|
||||
mse = sum([(y_p - y_t)**2 for y_p, y_t in zip(y_pred, y_true)])
|
||||
|
||||
def test_mlp_parameters():
|
||||
n = MLP(3, [4, 4, 1])
|
||||
assert len(n.parameters()) == 41
|
|
@ -1,3 +1,4 @@
|
|||
import pytest
|
||||
from micrograd.engine import Value
|
||||
|
||||
|
||||
|
@ -11,21 +12,21 @@ def test_value_repr():
|
|||
assert "Value(data=2.0)" == repr(v)
|
||||
|
||||
|
||||
def test_value_add():
|
||||
def test_value_add_opt():
|
||||
v1 = Value(2.0)
|
||||
v2 = Value(4.0)
|
||||
assert (v1 + v2).data == 6.0
|
||||
assert "Value(data=6.0)" == repr(v1 + v2)
|
||||
|
||||
|
||||
def test_value_sub():
|
||||
def test_value_sub_opt():
|
||||
v1 = Value(2.0)
|
||||
v2 = Value(4.0)
|
||||
assert (v1 - v2).data == -2.0
|
||||
assert "Value(data=-2.0)" == repr(v1 - v2)
|
||||
|
||||
|
||||
def test_value_mul():
|
||||
def test_value_mul_opt():
|
||||
v1 = Value(2.0)
|
||||
v2 = Value(4.0)
|
||||
v3 = Value(-1.0)
|
||||
|
@ -33,6 +34,36 @@ def test_value_mul():
|
|||
assert (v1 * v3).data == -2.0
|
||||
|
||||
|
||||
def test_value_rmul_opt():
|
||||
a = Value(2.0)
|
||||
b = 2 * a
|
||||
assert b.data == 4.0
|
||||
|
||||
|
||||
def test_value_pow_opt():
|
||||
a = Value(2.0)
|
||||
b = a ** 2
|
||||
assert b.data == 4.0
|
||||
|
||||
|
||||
def test_value_exp_opt():
|
||||
a = Value(1.0)
|
||||
b = a.exp()
|
||||
assert pytest.approx(b.data, 0.1) == 2.7
|
||||
|
||||
|
||||
def test_value_int_opt():
|
||||
a = Value(2.0)
|
||||
b = a - 1
|
||||
assert b.data == 1.0
|
||||
|
||||
|
||||
def test_value_div_opt():
|
||||
a = Value(2.0)
|
||||
b = a / 2
|
||||
assert b.data == 1.0
|
||||
|
||||
|
||||
def test_value_mul_add():
|
||||
v1 = Value(2.0)
|
||||
v2 = Value(4.0)
|
||||
|
|
Loading…
Reference in New Issue