Commit 0ee28ff1 authored by Yifan Wang's avatar Yifan Wang
Browse files

adding hdf5_to_csv and try to train the data

parent f9952731
%% Cell type:code id: tags:
``` python
import csv
import numpy as np
import pandas as pd
from tqdm import tqdm
import utils.samplefiles
```
%% Cell type:code id: tags:
``` python
train_wnum = 50
train_nnum = 50
test_wnum = 50
test_nnum = 50
```
%% Cell type:code id: tags:
``` python
data = utils.samplefiles.SampleFile()
data.read_hdf('./output/train.hdf')
```
%% Cell type:code id: tags:
``` python
wave, noise = data.as_dataframe(injection_parameters=True,
static_arguments=False,
command_line_arguments=False,
split_injections_noise=True)
```
%% Cell type:markdown id: tags:
Turn strain into multi-dimension array
%% Cell type:code id: tags:
``` python
h1w = wave['h1_strain'].tolist()
h1n = noise['h1_strain'].tolist()
wary = np.array(h1w)
nary = np.array(h1n)
```
%% Cell type:markdown id: tags:
Split train and test set
%% Cell type:code id: tags:
``` python
wtrain = wary[:train_wnum,:]
ntrain = nary[:train_nnum,:]
wtest = wary[train_wnum:,:]
ntest = nary[train_nnum:,:]
```
%% Cell type:markdown id: tags:
Insert label
%% Cell type:code id: tags:
``` python
wtrain = np.insert(wtrain, 0, values=1, axis=1)
ntrain = np.insert(ntrain, 0, values=0, axis=1)
wtest = np.insert(wtest, 0, values=1, axis=1)
ntest = np.insert(ntest, 0, values=0, axis=1)
```
%% Cell type:markdown id: tags:
Training set name
%% Cell type:code id: tags:
``` python
train_name = []
num = 50
train_name.append('label')
for i in tqdm(range(0,num)):
train_name.append('point{s1}'.format(s1=i))
```
%% Cell type:code id: tags:
``` python
with open("output/train.csv","w") as csvfile:
writer = csv.writer(csvfile)
#columns_name
writer.writerow(train_name)
#use writerows to write lines
for i in tqdm(range(0,train_wnum)):
writer.writerow(wtrain[i])
writer.writerow(ntrain[i])
```
%% Cell type:markdown id: tags:
testing set name
%% Cell type:code id: tags:
``` python
test_name = []
num = 50
test_name.append('label')
for i in tqdm(range(0,num)):
test_name.append('point{s1}'.format(s1=i))
```
%% Cell type:code id: tags:
``` python
with open("output/test.csv","w") as csvfile:
writer = csv.writer(csvfile)
#columns_name
writer.writerow(test_name)
#use writerows to write lines
for i in tqdm(range(0,test_wnum)):
writer.writerow(wtest[i])
writer.writerow(ntest[i])
```
%% Cell type:code id: tags:
``` python
```
This diff is collapsed.
from .cnn import *
from .cnn_1x1 import *
from .cnn_bn import *
from .cnn_dp import *
from .cnn_dp_bn import *
from .cnn_dp_bn_1x1 import *
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvNet1(nn.Module):
def __init__(self):
super(ConvNet1, self).__init__()
self.conv1 = nn.Conv1d(1, 16, 16)
self.max_pool1 = nn.MaxPool1d(4,4)
self.conv2 = nn.Conv1d(16, 32, 8)
self.max_pool2 = nn.MaxPool1d(4,4)
self.conv3 = nn.Conv1d(32, 64, 8)
self.max_pool3 = nn.MaxPool1d(4,4)
self.fc1 = nn.Linear(3904, 64)
self.fc2 = nn.Linear(64, 1)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.max_pool1(x)
x = self.conv2(x)
x = F.relu(x)
x = self.max_pool2(x)
x = self.conv3(x)
x = F.relu(x)
x = self.max_pool3(x)
# resize
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = torch.sigmoid(x)
return x
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvNet4(nn.Module):
def __init__(self):
super(ConvNet4, self).__init__()
self.conv1 = nn.Conv1d(1, 8, 1)
self.conv2 = nn.Conv1d(8, 16, 16)
self.max_pool1 = nn.MaxPool1d(4,4)
self.conv3 = nn.Conv1d(16, 32, 8)
self.max_pool2 = nn.MaxPool1d(4,4)
self.conv4 = nn.Conv1d(32, 64, 8)
self.max_pool3 = nn.MaxPool1d(4,4)
self.fc1 = nn.Linear(3904, 64)
self.fc2 = nn.Linear(64, 1)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.max_pool1(x)
x = self.conv3(x)
x = F.relu(x)
x = self.max_pool2(x)
x = self.conv4(x)
x = F.relu(x)
x = self.max_pool3(x)
# resize
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = torch.sigmoid(x)
return x
import torch
import torch.nn as nn
import torch.nn.functional as F
# 定义网络
class ConvNet3(nn.Module):
def __init__(self):
super(ConvNet3, self).__init__()
self.conv1 = nn.Conv1d(1, 16, 16)
self.bn1 = nn.BatchNorm1d(16)
self.max_pool1 = nn.MaxPool1d(4,4)
self.conv2 = nn.Conv1d(16, 32, 8)
self.bn2 = nn.BatchNorm1d(32)
self.max_pool2 = nn.MaxPool1d(4,4)
self.conv3 = nn.Conv1d(32, 64, 8)
self.bn3 = nn.BatchNorm1d(64)
self.max_pool3 = nn.MaxPool1d(4,4)
self.fc1 = nn.Linear(3904, 64)
self.fc2 = nn.Linear(64, 1)
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.max_pool1(x)
x = self.conv2(x)
x = F.relu(self.bn2(x))
x = self.max_pool2(x)
x = self.conv3(x)
x = F.relu(self.bn3(x))
x = self.max_pool3(x)
# resize
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = torch.sigmoid(x)
return x
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvNet2(nn.Module):
def __init__(self):
super(ConvNet2, self).__init__()
self.conv1 = nn.Conv1d(1, 16, 16)
self.max_pool1 = nn.MaxPool1d(4,4)
self.conv2 = nn.Conv1d(16, 32, 8)
self.max_pool2 = nn.MaxPool1d(4,4)
self.conv3 = nn.Conv1d(32, 64, 8)
self.max_pool3 = nn.MaxPool1d(4,4)
self.fc1 = nn.Linear(3904, 64)
self.fc2 = nn.Linear(64, 1)
# dropout
self.dropout = nn.Dropout(p=.5)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.max_pool1(x)
x = self.conv2(x)
x = F.relu(x)
x = self.max_pool2(x)
x = self.conv3(x)
x = F.relu(x)
x = self.max_pool3(x)
# resize
x = x.view(x.size(0), -1)
x = self.dropout(F.relu(self.fc1(x)))
x = self.fc2(x)
x = torch.sigmoid(x)
return x
import torch
import torch.nn as nn
import torch.nn.functional as F
# 定义网络
class ConvNet5(nn.Module):
def __init__(self):
super(ConvNet5, self).__init__()
self.conv1 = nn.Conv1d(1, 16, 16)
self.bn1 = nn.BatchNorm1d(16)
self.max_pool1 = nn.MaxPool1d(4,4)
self.conv2 = nn.Conv1d(16, 32, 8)
self.bn2 = nn.BatchNorm1d(32)
self.max_pool2 = nn.MaxPool1d(4,4)
self.conv3 = nn.Conv1d(32, 64, 8)
self.bn3 = nn.BatchNorm1d(64)
self.max_pool3 = nn.MaxPool1d(4,4)
self.fc1 = nn.Linear(3904, 64)
self.fc2 = nn.Linear(64, 1)
# dropout
self.dropout = nn.Dropout(p=.5)
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.max_pool1(x)
x = self.conv2(x)
x = F.relu(self.bn2(x))
x = self.max_pool2(x)
x = self.conv3(x)
x = F.relu(self.bn3(x))
x = self.max_pool3(x)
# resize
x = x.view(x.size(0), -1)
x = self.dropout(F.relu(self.fc1(x)))
x = self.fc2(x)
x = torch.sigmoid(x)
return x
import torch
import torch.nn as nn
import torch.nn.functional as F
# 定义网络
class ConvNet6(nn.Module):
def __init__(self):
super(ConvNet6, self).__init__()
self.conv1 = nn.Conv1d(1, 8, 1)
self.bn1 = nn.BatchNorm1d(8)
self.conv2 = nn.Conv1d(8, 16, 16)
self.bn2 = nn.BatchNorm1d(16)
self.max_pool1 = nn.MaxPool1d(4,4)
self.conv3 = nn.Conv1d(16, 32, 8)
self.bn3 = nn.BatchNorm1d(32)
self.max_pool2 = nn.MaxPool1d(4,4)
self.conv4 = nn.Conv1d(32, 64, 8)
self.bn4 = nn.BatchNorm1d(64)
self.max_pool3 = nn.MaxPool1d(4,4)
self.fc1 = nn.Linear(3904, 64)
self.fc2 = nn.Linear(64, 1)
# dropout
self.dropout = nn.Dropout(p=.5)
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.conv2(x)
x = F.relu(self.bn2(x))
x = self.max_pool1(x)
x = self.conv3(x)
x = F.relu(self.bn3(x))
x = self.max_pool2(x)
x = self.conv4(x)
x = F.relu(self.bn4(x))
x = self.max_pool3(x)
# resize
x = x.view(x.size(0), -1)
x = self.dropout(F.relu(self.fc1(x)))
x = self.fc2(x)
x = torch.sigmoid(x)
return x
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment