import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
N = 10 # number of data points
m = .7
c = 0
x = np.linspace(0,2*np.pi,N)
y = m*x + c + np.random.normal(0,.3,x.shape)
plt.figure()
plt.plot(x,y,'o')
plt.xlabel('x')
plt.ylabel('y')
plt.title('2D data (#data = %d)' % N)
plt.show()
import torch
from torch.utils.data import Dataset
class MyDataset(Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
sample = {
'feature': torch.tensor([1,self.x[idx]]),
'label': torch.tensor([self.y[idx]])}
return sample
dataset = MyDataset(x, y)
for i in range(len(dataset)):
sample = dataset[i]
print(i, sample['feature'], sample['label'])
from torch.utils.data import DataLoader
dataset = MyDataset(x, y)
batch_size = 4
shuffle = True
num_workers = 4
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
import pprint as pp
for i_batch, samples in enumerate(dataloader):
print('\nbatch# = %s' % i_batch)
print('samples: ')
pp.pprint(samples)
import torch.nn as nn
from torch.nn.parameter import Parameter
class MyModel(nn.Module):
def __init__(self, input_dim, output_dim):
super(MyModel, self).__init__()
self.weight = Parameter(torch.Tensor(output_dim, input_dim))
self.bias = Parameter(torch.Tensor(output_dim, 1))
stdv = 1.
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
weight_and_bias = torch.cat((self.weight, self.bias), 1)
#print(weight_and_bias)
#print(weight_and_bias.t().shape)
#print(x.shape)
#print(self.weight.size())
out = x.matmul(weight_and_bias.t())
return out
input_dim = 1
output_dim = 1
model = MyModel(input_dim, output_dim)
model(torch.rand([5,2]))
Often called loss or error
cost = nn.MSELoss()
In other words training (or learning from data)
num_epochs = 100 # How many times the entire training data is seen?
l_rate = 0.01
optimiser = torch.optim.SGD(model.parameters(), lr = l_rate)
dataset = MyDataset(x, y)
batch_size = 4
shuffle = True
num_workers = 4
training_sample_generator = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
for epoch in range(num_epochs):
if epoch % 10 == 0:
print('Epoch = %s' % epoch)
for batch_i, samples in enumerate(training_sample_generator):
predictions = model(samples['feature'])
error = cost(predictions, samples['label'])
if epoch % 10 == 0:
print('\tBatch = %s, Error = %s' % (batch_i, error.item()))
# Before the backward pass, use the optimizer object to zero all of the
# gradients for the variables it will update (which are the learnable
# weights of the model). This is because by default, gradients are
# accumulated in buffers( i.e, not overwritten) whenever .backward()
# is called. Checkout docs of torch.autograd.backward for more details.
optimiser.zero_grad()
# Backward pass: compute gradient of the loss with respect to model
# parameters
error.backward()
# Calling the step function on an Optimizer makes an update to its
# parameters
optimiser.step()
#from torch.autograd.variable import Variable
x_for_plotting = np.linspace(0, 2*np.pi, 1000)
design_matrix = torch.tensor(np.vstack([np.ones(x_for_plotting.shape), x_for_plotting]).T, dtype=torch.float32)
#print('Design matrix:\n', design_matrix)
print('Design matrix shape:', design_matrix.shape)
y_for_plotting = model.forward(design_matrix)
print('y_for_plotting shape:', y_for_plotting.shape)
plt.figure()
plt.plot(x,y,'o')
plt.plot(x_for_plotting, y_for_plotting.data.numpy(), 'r-')
plt.xlabel('x')
plt.ylabel('y')
plt.title('2D data (#data = %d)' % N)
plt.show()