Skip to content
Snippets Groups Projects
Commit 2d2bdda9 authored by Simran Dave's avatar Simran Dave
Browse files

Upload New File

parent fc43603f
No related branches found
No related tags found
No related merge requests found
%% Cell type:code id: tags:
``` python
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torchsummary import summary
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split
from models import *
%matplotlib inline
```
%% Cell type:code id: tags:
``` python
# hyper-parameters
# how many samples per batch to load
batch_size = 100
# percentage of training set to use as validation
valid_size = 0.1
# number of epochs to train the model
n_epochs = 30
# track change in validation loss
valid_loss_min = np.Inf
# specify the image classes
classes = ['noise', 'wave']
# gpu
DEVICE = torch.device('cuda: 3' if torch.cuda.is_available() else 'cpu')
```
%% Cell type:code id: tags:
``` python
# choose the training and test datasets
train_set = pd.read_csv("./output/train.csv", dtype=np.float32)
# Seperate the features and labels
total_train_label = train_set.label.values
total_train_data = train_set.loc[:, train_set.columns != 'label'].values
total_train_data = total_train_data.reshape(-1, 1, 4096)
# Split into training and test set
data_train, data_valid, label_train, label_valid = train_test_split(total_train_data, total_train_label, test_size=0.1, random_state=2)
```
%% Cell type:code id: tags:
``` python
# create feature and targets tensor for train set. As you remember we need variable to accumulate gradients. Therefore first we create tensor, then we will create variable
dataTrain = torch.from_numpy(data_train)
labelTrain = torch.from_numpy(label_train).type(torch.LongTensor) # data type is long
# create feature and targets tensor for valid set.
dataValid = torch.from_numpy(data_valid)
labelValid = torch.from_numpy(label_valid).type(torch.LongTensor) # data type is long
```
%% Cell type:code id: tags:
``` python
# Pytorch train and valid sets
train = torch.utils.data.TensorDataset(dataTrain,labelTrain)
valid = torch.utils.data.TensorDataset(dataValid,labelValid)
# data loader
train_loader = torch.utils.data.DataLoader(train, batch_size = batch_size, shuffle = True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size = batch_size, shuffle = True)
```
%% Cell type:code id: tags:
``` python
# instantiate model
model = ConvNet4().to(DEVICE)
# specify optimizer
optimizer = optim.Adam(model.parameters(), lr=5e-5)
#learning rate
lr_sched = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
# summary
#summary(model, input_size=(1,4096))
```
%% Cell type:code id: tags:
``` python
#curve list
train_Loss_list = []
valid_Loss_list = []
accracy_list = []
valid_len = 1
```
%% Cell type:code id: tags:
``` python
# train
for epoch in range(0, n_epochs):
# keep track of training, validation loss and correct
train_loss = 0.0
valid_loss = 0.0
correct = 0.0
# train the model
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(DEVICE), target.to(DEVICE).float().reshape(batch_size, 1)
optimizer.zero_grad()
output = model(data)
loss = F.binary_cross_entropy(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item()*data.size(0)
#valid more
if (batch_idx + 1) % valid_len == 0:
class_correct = list(0. for i in range(2))
class_total = list(0. for i in range(2))
# validate the model
model.eval()
for data, target in valid_loader:
data, target = data.to(DEVICE), target.to(DEVICE).float().reshape(batch_size, 1)
output = model(data)
loss = F.binary_cross_entropy(output, target)
valid_loss += loss.item()*data.size(0)
# less the limit 0.5->0.1, higher the recall rate
pred = torch.tensor([[1] if num[0] >= 0.5 else [0] for num in output]).to(DEVICE)
# compare predictions to true label
correct_tensor = pred.eq(target.data.view_as(pred).long())
correct = np.squeeze(correct_tensor.cpu().numpy())
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i].int()
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate accuracy
accuracy = 100. * np.sum(class_correct) / np.sum(class_total)
# calculate average losses
train_loss = train_loss / (valid_len * batch_size)
valid_loss = valid_loss / len(valid_loader.dataset)
#curve data
train_Loss_list.append(train_loss)
valid_Loss_list.append(valid_loss)
accracy_list.append(accuracy)
# print training/validation statistics
if((epoch * 10 + (batch_idx + 1) / valid_len) % 5 == 0):
print('iteration: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch * 10 + (batch_idx + 1) / valid_len, train_loss, valid_loss))
# save model if validation loss has decreased
if valid_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict(), './param/exp1_data1.2_convnet4.pt')
valid_loss_min = valid_loss
train_loss = 0.0
valid_loss = 0.0
# learning rate
lr_sched.step()
```
%% Output
Validation loss decreased (inf --> 0.697138). Saving model ...
iteration: 5.0 Training Loss: 0.701615 Validation Loss: 0.699479
Validation loss decreased (0.697138 --> 0.692862). Saving model ...
Validation loss decreased (0.692862 --> 0.691105). Saving model ...
Validation loss decreased (0.691105 --> 0.690365). Saving model ...
Validation loss decreased (0.690365 --> 0.690097). Saving model ...
Validation loss decreased (0.690097 --> 0.689668). Saving model ...
Validation loss decreased (0.689668 --> 0.689384). Saving model ...
Validation loss decreased (0.689384 --> 0.688576). Saving model ...
iteration: 15.0 Training Loss: 0.689572 Validation Loss: 0.691786
Validation loss decreased (0.688576 --> 0.687145). Saving model ...
Validation loss decreased (0.687145 --> 0.686666). Saving model ...
iteration: 25.0 Training Loss: 0.665954 Validation Loss: 0.685901
Validation loss decreased (0.686666 --> 0.685901). Saving model ...
Validation loss decreased (0.685901 --> 0.685452). Saving model ...
Validation loss decreased (0.685452 --> 0.684345). Saving model ...
Validation loss decreased (0.684345 --> 0.684260). Saving model ...
iteration: 35.0 Training Loss: 0.670146 Validation Loss: 0.682974
Validation loss decreased (0.684260 --> 0.682974). Saving model ...
Validation loss decreased (0.682974 --> 0.682927). Saving model ...
Validation loss decreased (0.682927 --> 0.682300). Saving model ...
Validation loss decreased (0.682300 --> 0.681379). Saving model ...
Validation loss decreased (0.681379 --> 0.681055). Saving model ...
Validation loss decreased (0.681055 --> 0.679306). Saving model ...
Validation loss decreased (0.679306 --> 0.678713). Saving model ...
iteration: 45.0 Training Loss: 0.690382 Validation Loss: 0.677494
Validation loss decreased (0.678713 --> 0.677494). Saving model ...
Validation loss decreased (0.677494 --> 0.674677). Saving model ...
iteration: 55.0 Training Loss: 0.646271 Validation Loss: 0.673287
Validation loss decreased (0.674677 --> 0.673287). Saving model ...
Validation loss decreased (0.673287 --> 0.673059). Saving model ...
Validation loss decreased (0.673059 --> 0.672390). Saving model ...
Validation loss decreased (0.672390 --> 0.671886). Saving model ...
Validation loss decreased (0.671886 --> 0.671358). Saving model ...
Validation loss decreased (0.671358 --> 0.670887). Saving model ...
Validation loss decreased (0.670887 --> 0.670519). Saving model ...
Validation loss decreased (0.670519 --> 0.670456). Saving model ...
Validation loss decreased (0.670456 --> 0.669588). Saving model ...
iteration: 65.0 Training Loss: 0.649845 Validation Loss: 0.668721
Validation loss decreased (0.669588 --> 0.668721). Saving model ...
Validation loss decreased (0.668721 --> 0.667928). Saving model ...
Validation loss decreased (0.667928 --> 0.667196). Saving model ...
Validation loss decreased (0.667196 --> 0.666568). Saving model ...
Validation loss decreased (0.666568 --> 0.664699). Saving model ...
iteration: 75.0 Training Loss: 0.635979 Validation Loss: 0.663701
Validation loss decreased (0.664699 --> 0.663701). Saving model ...
Validation loss decreased (0.663701 --> 0.661460). Saving model ...
Validation loss decreased (0.661460 --> 0.660418). Saving model ...
Validation loss decreased (0.660418 --> 0.660231). Saving model ...
Validation loss decreased (0.660231 --> 0.659972). Saving model ...
Validation loss decreased (0.659972 --> 0.659714). Saving model ...
Validation loss decreased (0.659714 --> 0.659505). Saving model ...
Validation loss decreased (0.659505 --> 0.658895). Saving model ...
Validation loss decreased (0.658895 --> 0.657738). Saving model ...
iteration: 85.0 Training Loss: 0.613943 Validation Loss: 0.654649
Validation loss decreased (0.657738 --> 0.654649). Saving model ...
Validation loss decreased (0.654649 --> 0.653239). Saving model ...
Validation loss decreased (0.653239 --> 0.651977). Saving model ...
Validation loss decreased (0.651977 --> 0.650306). Saving model ...
Validation loss decreased (0.650306 --> 0.649231). Saving model ...
Validation loss decreased (0.649231 --> 0.648498). Saving model ...
Validation loss decreased (0.648498 --> 0.648056). Saving model ...
Validation loss decreased (0.648056 --> 0.646154). Saving model ...
iteration: 95.0 Training Loss: 0.607578 Validation Loss: 0.647569
Validation loss decreased (0.646154 --> 0.643473). Saving model ...
Validation loss decreased (0.643473 --> 0.642014). Saving model ...
Validation loss decreased (0.642014 --> 0.639245). Saving model ...
Validation loss decreased (0.639245 --> 0.636666). Saving model ...
Validation loss decreased (0.636666 --> 0.636232). Saving model ...
Validation loss decreased (0.636232 --> 0.632544). Saving model ...
iteration: 105.0 Training Loss: 0.583273 Validation Loss: 0.636602
Validation loss decreased (0.632544 --> 0.628861). Saving model ...
Validation loss decreased (0.628861 --> 0.623915). Saving model ...
Validation loss decreased (0.623915 --> 0.623855). Saving model ...
Validation loss decreased (0.623855 --> 0.620961). Saving model ...
iteration: 115.0 Training Loss: 0.541681 Validation Loss: 0.617632
Validation loss decreased (0.620961 --> 0.617632). Saving model ...
Validation loss decreased (0.617632 --> 0.617007). Saving model ...
Validation loss decreased (0.617007 --> 0.614840). Saving model ...
Validation loss decreased (0.614840 --> 0.609495). Saving model ...
iteration: 125.0 Training Loss: 0.570770 Validation Loss: 0.602589
Validation loss decreased (0.609495 --> 0.602589). Saving model ...
Validation loss decreased (0.602589 --> 0.598895). Saving model ...
Validation loss decreased (0.598895 --> 0.596339). Saving model ...
Validation loss decreased (0.596339 --> 0.590676). Saving model ...
iteration: 135.0 Training Loss: 0.502801 Validation Loss: 0.589131
Validation loss decreased (0.590676 --> 0.589131). Saving model ...
Validation loss decreased (0.589131 --> 0.584508). Saving model ...
Validation loss decreased (0.584508 --> 0.581870). Saving model ...
Validation loss decreased (0.581870 --> 0.581686). Saving model ...
Validation loss decreased (0.581686 --> 0.572193). Saving model ...
iteration: 145.0 Training Loss: 0.460022 Validation Loss: 0.571146
Validation loss decreased (0.572193 --> 0.571146). Saving model ...
Validation loss decreased (0.571146 --> 0.570640). Saving model ...
Validation loss decreased (0.570640 --> 0.566255). Saving model ...
Validation loss decreased (0.566255 --> 0.565251). Saving model ...
Validation loss decreased (0.565251 --> 0.556785). Saving model ...
Validation loss decreased (0.556785 --> 0.556608). Saving model ...
iteration: 155.0 Training Loss: 0.465487 Validation Loss: 0.556718
Validation loss decreased (0.556608 --> 0.554633). Saving model ...
Validation loss decreased (0.554633 --> 0.549326). Saving model ...
Validation loss decreased (0.549326 --> 0.541815). Saving model ...
Validation loss decreased (0.541815 --> 0.541233). Saving model ...
Validation loss decreased (0.541233 --> 0.538043). Saving model ...
iteration: 165.0 Training Loss: 0.415810 Validation Loss: 0.544216
Validation loss decreased (0.538043 --> 0.532060). Saving model ...
Validation loss decreased (0.532060 --> 0.529136). Saving model ...
Validation loss decreased (0.529136 --> 0.523866). Saving model ...
iteration: 175.0 Training Loss: 0.457607 Validation Loss: 0.534893
Validation loss decreased (0.523866 --> 0.514266). Saving model ...
Validation loss decreased (0.514266 --> 0.510827). Saving model ...
Validation loss decreased (0.510827 --> 0.510586). Saving model ...
Validation loss decreased (0.510586 --> 0.506574). Saving model ...
iteration: 185.0 Training Loss: 0.400222 Validation Loss: 0.526907
Validation loss decreased (0.506574 --> 0.502040). Saving model ...
Validation loss decreased (0.502040 --> 0.498628). Saving model ...
Validation loss decreased (0.498628 --> 0.491324). Saving model ...
Validation loss decreased (0.491324 --> 0.487613). Saving model ...
iteration: 195.0 Training Loss: 0.364942 Validation Loss: 0.486138
Validation loss decreased (0.487613 --> 0.486138). Saving model ...
Validation loss decreased (0.486138 --> 0.477574). Saving model ...
Validation loss decreased (0.477574 --> 0.477349). Saving model ...
Validation loss decreased (0.477349 --> 0.477175). Saving model ...
Validation loss decreased (0.477175 --> 0.477044). Saving model ...
iteration: 205.0 Training Loss: 0.346965 Validation Loss: 0.478333
iteration: 215.0 Training Loss: 0.316834 Validation Loss: 0.483181
Validation loss decreased (0.477044 --> 0.476688). Saving model ...
Validation loss decreased (0.476688 --> 0.476429). Saving model ...
iteration: 225.0 Training Loss: 0.385469 Validation Loss: 0.475942
Validation loss decreased (0.476429 --> 0.475942). Saving model ...
iteration: 235.0 Training Loss: 0.317920 Validation Loss: 0.480409
iteration: 245.0 Training Loss: 0.344972 Validation Loss: 0.478053
Validation loss decreased (0.475942 --> 0.475771). Saving model ...
Validation loss decreased (0.475771 --> 0.475147). Saving model ...
Validation loss decreased (0.475147 --> 0.474080). Saving model ...
Validation loss decreased (0.474080 --> 0.473306). Saving model ...
Validation loss decreased (0.473306 --> 0.473266). Saving model ...
iteration: 255.0 Training Loss: 0.351883 Validation Loss: 0.473462
iteration: 265.0 Training Loss: 0.326490 Validation Loss: 0.477889
Validation loss decreased (0.473266 --> 0.473055). Saving model ...
Validation loss decreased (0.473055 --> 0.472409). Saving model ...
Validation loss decreased (0.472409 --> 0.471368). Saving model ...
iteration: 275.0 Training Loss: 0.327591 Validation Loss: 0.472794
iteration: 285.0 Training Loss: 0.363089 Validation Loss: 0.472628
Validation loss decreased (0.471368 --> 0.470942). Saving model ...
Validation loss decreased (0.470942 --> 0.470312). Saving model ...
iteration: 295.0 Training Loss: 0.367475 Validation Loss: 0.470715
Validation loss decreased (0.470312 --> 0.469481). Saving model ...
Validation loss decreased (0.469481 --> 0.469162). Saving model ...
%% Cell type:code id: tags:
``` python
length = len(train_Loss_list)
x = range(0, length)
plt.plot(x, train_Loss_list, label='train loss')
plt.plot(x, valid_Loss_list, label='valid loss')
plt.title('Convolutional Neural Network')
plt.xlabel('iteration')
plt.ylabel('Loss')
plt.legend()
plt.savefig("cnn_loss.jpg")
plt.show()
```
%% Output
%% Cell type:code id: tags:
``` python
length = len(accracy_list)
x = range(0, length)
plt.plot(x, accracy_list)
plt.title('Convolutional Neural Network')
plt.xlabel('iteration')
plt.ylabel('accracy')
plt.savefig("cnn_accuracy.jpg")
plt.show()
```
%% Output
%% Cell type:code id: tags:
``` python
#np.savetxt('./cache/new/data_1.2_convnet1/accracy.txt',accracy_list,fmt="%.6f",delimiter="\n")
#np.savetxt('./cache/new/data_1.2_convnet1/train_Loss_list.txt',train_Loss_list,fmt="%.6f",delimiter="\n")
#np.savetxt('./cache/new/data_1.2_convnet1/valid_Loss_list.txt',valid_Loss_list,fmt="%.6f",delimiter="\n")
```
%% Cell type:code id: tags:
``` python
model.load_state_dict(torch.load('./param/exp1_data1.2_convnet4.pt'))
```
%% Output
<All keys matched successfully>
%% Cell type:code id: tags:
``` python
test_set = pd.read_csv("./output/test.csv", dtype=np.float32)
# Seperate the features and labels
label_test = test_set.label.values
data_test = test_set.loc[:, test_set.columns != 'label'].values
data_test = data_test.reshape(-1, 1, 4096)
# create feature and targets tensor for test set.
dataTest = torch.from_numpy(data_test)
labelTest = torch.from_numpy(label_test).type(torch.LongTensor) # data type is long
test = torch.utils.data.TensorDataset(dataTest,labelTest)
test_loader = torch.utils.data.DataLoader(test, batch_size = batch_size, shuffle = True)
```
%% Cell type:code id: tags:
``` python
# track test loss
test_loss = 0.0
class_correct = list(0. for i in range(2))
class_total = list(0. for i in range(2))
model.eval()
# iterate over test data
for data, target in test_loader:
data, target = data.to(DEVICE), target.to(DEVICE).float().reshape(batch_size, 1)
output = model(data)
loss = F.binary_cross_entropy(output, target)
test_loss += loss.item()*data.size(0)
# less the limit 0.5->0.1, higher the recall rate
pred = torch.tensor([[1] if num[0] >= 0.5 else [0] for num in output]).to(DEVICE)
# compare predictions to true label
correct_tensor = pred.eq(target.data.view_as(pred).long())
correct = np.squeeze(correct_tensor.cpu().numpy())
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i].int()
class_correct[label] += correct[i].item()
class_total[label] += 1
# average test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(2):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
```
%% Output
Test Loss: 0.408014
Test Accuracy of noise: 94% (470/500)
Test Accuracy of wave: 75% (379/500)
Test Accuracy (Overall): 84% (849/1000)
%% Cell type:code id: tags:
``` python
```
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment