from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
mnist = datasets.MNIST(root='.', train=True, download=True)
print("Number of training examples", mnist.train_data.shape)
print("Image information", mnist[0])
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(mnist[0][0])
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fully = nn.Sequential(
nn.Linear(28*28, 10)
)
def forward(self, x):
x = x.view([-1,28*28])
x = self.fully(x)
x = F.log_softmax(x, dim=1)
return x
train_loader = torch.utils.data.DataLoader(datasets.MNIST(root='.',train=True,transform=transforms.Compose([transforms.ToTensor()])), batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(datasets.MNIST(root='.',train=False,transform=transforms.Compose([transforms.ToTensor()])), batch_size=1, shuffle=True)
def train():
learning_rate = 1e-3
num_epochs = 3
net = Net()
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
for epoch in range(num_epochs):
for batch_idx, (data, target) in enumerate(train_loader):
output = net(data)
loss = F.nll_loss(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Epoch = %f. Batch = %s. Loss = %s' % (epoch, batch_idx, loss.item()))
return net
net = train()
net.eval()
test_loss = 0
correct = 0
total = 0
for data, target in test_loader:
total += len(target)
output = net(data)
pred = output.max(1, keepdim=True)[1]
correct += target.eq(pred.view_as(target)).sum()
print("Correct out of %s" % total, correct.item())
print("Percentage accuracy", correct.item()*100/10000.)