Commit 91e2b02c authored by Alejandro Riera's avatar Alejandro Riera

courseras logistic regresion example written with ptyroch's autograd

parent 904ec895
"""
My attempt at reproducing Coursera's logistic regresion example with autograd
"""
import numpy as np
import torch
class LogRegModel():
def __init__(self):
self.w = None
self.b = None
def train(self, X, Y, epochs=1000, learning_rate=0.5):
self.w = torch.zeros((X.shape[0], 1), requires_grad=True, dtype=torch.float64)
self.b = torch.zeros((1,1), requires_grad=True, dtype=torch.double)
m = X.shape[1]
for i in range(epochs):
z = self.w.transpose(0, 1).mm(X).add(self.b)
A = torch.sigmoid(z)
loss = Y.mul(A.log()) + (1-Y).mul((1-A).log())
loss = loss.sum()/(-m)
loss.backward()
with torch.no_grad():
self.w -= learning_rate * self.w.grad
self.b -= learning_rate * self.b.grad
# Manually zero the gradients after running the backward pass
self.w.grad.zero_()
self.b.grad.zero_()
if i % 100 == 0:
print ("Loss after iteration %i: %f" %(i, loss))
def predict(self, X):
with torch.no_grad():
z = self.w.transpose(0, 1).mm(X).add(self.b)
Y_pred = torch.sigmoid(z)
Y_pred[Y_pred<0.5] = 0
Y_pred[Y_pred>=0.5] = 1
return Y_pred
def benchmark(self, X, Y):
Y_pred = self.predict(X)
accuracy = np.mean(np.abs(Y_pred.numpy() - Y.numpy()))
accuracy = 100 - 100 * accuracy
return accuracy
def predict(w, b, X):
with torch.no_grad():
z = w.transpose(0, 1).mm(X).add(b)
Y_pred = torch.sigmoid(z)
Y_pred[Y_pred<0.5] = 0
Y_pred[Y_pred>=0.5] = 1
return Y_pred
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
w = torch.zeros((X_train.shape[0], 1), requires_grad=True, dtype=torch.float64)
b = torch.zeros((1,1), requires_grad=True, dtype=torch.double)
m = X_train.shape[1]
for i in range(num_iterations):
z = w.transpose(0, 1).mm(X_train).add(b)
A = torch.sigmoid(z)
loss = Y_train.mul(A.log()) + (1-Y_train).mul((1-A).log())
loss = loss.sum()/(-m)
loss.backward()
# learning_rate = 0.01
with torch.no_grad():
w -= learning_rate * w.grad
b -= learning_rate * b.grad
# Manually zero the gradients after running the backward pass
w.grad.zero_()
b.grad.zero_()
if print_cost and i % 100 == 0:
print ("Loss after iteration %i: %f" %(i, loss))
Y_pred_train = predict(w, b, X_train)
Y_pred_test = predict(w, b, X_test)
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_pred_train.detach().numpy() - Y_train.detach().numpy())) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_pred_test.detach().numpy() - Y_test.detach().numpy())) * 100))
return w, b
if __name__ == "__main__":
from coursera01w02.lr_utils import load_dataset
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0],-1).T
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
train_set_x = torch.from_numpy(train_set_x).type(torch.float64)
train_set_y = torch.from_numpy(train_set_y).type(torch.float64)
test_set_x = torch.from_numpy(test_set_x).type(torch.float64)
test_set_y = torch.from_numpy(test_set_y).type(torch.float64)
# d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
logreg = LogRegModel()
logreg.train(train_set_x, train_set_y, epochs=2000, learning_rate=0.005)
train_accuracy = logreg.benchmark(train_set_x, train_set_y)
test_accuracy = logreg.benchmark(test_set_x, test_set_y)
print(f"train accuracy: {train_accuracy}")
print(f"test accuracy: {test_accuracy}")
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment