import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
N = 10 # number of data points
m = .9
c = 1
x = np.linspace(0,2*np.pi,N)
y = m*x + c + np.random.normal(0,.3,x.shape)
plt.figure()
plt.plot(x,y,'o')
plt.xlabel('x')
plt.ylabel('y')
plt.title('2D data (#data = %d)' % N)
plt.show()
import torch
from torch.utils.data import Dataset
class MyDataset(Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
sample = {
'feature': torch.tensor([self.x[idx]]),
'label': torch.tensor([self.y[idx]])}
return sample
dataset = MyDataset(x, y)
for i in range(len(dataset)):
sample = dataset[i]
print(i, sample['feature'], sample['label'])
from torch.utils.data import DataLoader
dataset = MyDataset(x, y)
batch_size = 4
shuffle = True
num_workers = 4
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
import pprint as pp
for i_batch, samples in enumerate(dataloader):
print('\nbatch# = %s' % i_batch)
print('samples: ')
pp.pprint(samples)
import torch.nn as nn
import torch.nn.functional as F
class MyModel(nn.Module):
def __init__(self, input_dim, output_dim):
super(MyModel, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
out = self.linear(x)
return out
input_dim = 1
output_dim = 1
model = MyModel(input_dim, output_dim)
Often called loss or error
cost = nn.MSELoss()
In other words training (or learning from data)
num_epochs = 10 # How many times the entire training data is seen?
l_rate = 0.01
optimiser = torch.optim.SGD(model.parameters(), lr = l_rate)
dataset = MyDataset(x, y)
batch_size = 4
shuffle = True
num_workers = 4
training_sample_generator = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
for epoch in range(num_epochs):
print('Epoch = %s' % epoch)
for batch_i, samples in enumerate(training_sample_generator):
predictions = model(samples['feature'])
error = cost(predictions, samples['label'])
print('\tBatch = %s, Error = %s' % (batch_i, error.item()))
# Before the backward pass, use the optimizer object to zero all of the
# gradients for the variables it will update (which are the learnable
# weights of the model). This is because by default, gradients are
# accumulated in buffers( i.e, not overwritten) whenever .backward()
# is called. Checkout docs of torch.autograd.backward for more details.
optimiser.zero_grad()
# Backward pass: compute gradient of the loss with respect to model
# parameters
error.backward()
# Calling the step function on an Optimizer makes an update to its
# parameters
optimiser.step()
x_for_plotting = np.linspace(0, 2*np.pi, 1000)
design_matrix = torch.tensor(x_for_plotting.T, dtype=torch.float32)
y_for_plotting = model.forward(design_matrix.unsqueeze(1))
print('y_for_plotting shape:', y_for_plotting.shape)
plt.figure()
plt.plot(x,y,'o')
plt.plot(x_for_plotting, y_for_plotting.data.numpy(), 'r-')
plt.xlabel('x')
plt.ylabel('y')
plt.title('2D data (#data = %d)' % N)
plt.show()