Beispiel #1
0
def main():
    train_x = []
    train_y = []
    for i in range(20):
        x_train0, y_train0 = gaps.load_chunk(i,
                                             subset='train',
                                             datadir='/home/turing/temp')
        for sample in range(x_train0.shape[0]):
            train_x.append(x_train0[sample])
            train_y.append(y_train0[sample])
    train_x = np.array(train_x)
    train_y = np.array(train_y)
    #train_y = np.expand_dims(train_y,1)
    print("loaded_training data")
    #print (train_x.shape)
    #print (train_y.shape)
    model = TCNN()
    model.cuda()
    optimizer = optim.SGD(model.parameters(), lr=0.00001, momentum=0.9)
    criterion = nn.BCEWithLogitsLoss()
    for epoch in range(40):  # loop over the dataset multiple times
        for i in range(0, train_x.shape[0], 64):
            inputs = []
            labels = []
            for j in range(i, i + 128):
                if j < train_x.shape[0]:
                    inputs.append(train_x[j])
                    #print (int(train_y[j]==0))
                    labels.append([int(train_y[j] == 0), int(train_y[j] == 1)])
            if len(labels) == 128:
                inputs = np.asarray(inputs, dtype=np.float32)
                labels = np.asarray(labels, dtype=np.float32)
                inputs, labels = Variable(
                    torch.from_numpy(inputs).cuda()), Variable(
                        torch.from_numpy(labels).cuda())
                optimizer.zero_grad()
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                print("Current loss :")
                print(loss.data[0])
                loss.backward()
                optimizer.step()
        true = 0
        inputs = []
        labels = []
        for k in range(0, 32):
            inputs.append(train_x[k])
        inputs = np.asarray(inputs, dtype=np.float32)
        inputs = Variable(torch.from_numpy(inputs).cuda())
        outputs = model(inputs)
        #print (outputs.size())
        outputs = outputs.max(dim=1)[1]
        results = outputs.data.cpu().numpy()
        for i in range(32):
            if (results[k] == train_y[k]):
                true = true + 1
        print("Correct outputs")
        print(true)
        print("epoch complete")
        torch.save(model.state_dict(), "/home/turing/temp/alex.pt")
Beispiel #2
0
def load_gaps():
    x, y = gaps.load_chunk(0, datadir='./', debug_outputs=True)
    x = np.reshape(x, (x.shape[0], 64, 64, 1))
    for im, idx in zip(x, range(32000)):
        if np.min(im) < 0:
            x[idx] = im + np.abs(np.min(im))
    return x, y
Beispiel #3
0
def load_gaps_crack_images(chunk_list):
    """
    from the gaps dataset load the crack images
    @param chunk_list: a list that include the chunks which need to load
    """
    ret = None
    for chunk in chunk_list:
        x, y = gaps.load_chunk(chunk, datadir='.', debug_outputs=True)
        # find the 1 label
        crack_x = x[np.where(y == 1)]
        crack_x = np.reshape(crack_x, (crack_x.shape[0], 64, 64, 1))
        for im, idx in zip(crack_x, range(crack_x.shape[0])):
            if np.min(im) < 0:
                crack_x[idx] = im + np.abs(np.min(im))
        if type(ret) == type(None):
            ret = crack_x
        else:
            ret = np.vstack((ret, crack_x))
    return ret
Beispiel #4
0
#Dataset Folder path
dataset_dir = '../Dataset'
#load training dataset info file
train_info = gaps.get_dataset_info(version=2,
                                   patchsize=160,
                                   issue='NORMvsDISTRESS_50k',
                                   subset='train',
                                   datadir='../Dataset')

# load all chunks of training dataset
x_train = []#mat to store x_train
y_train = []#mat to store y_train

for chunk_id in range(train_info['n_chunks']):
    x, y = gaps.load_chunk(
                        chunk_id=chunk_id,
                        version=2,
                        patchsize=160,
                        issue='NORMvsDISTRESS_50k',
                        subset='test',
                        datadir='../Dataset')
    x_train.append(x)
    y_train.append(y)
#convert to np array
x_train = np.array(x_train)
y_train = np.array(y_train)
print("Shape of x_train:"+str(x_train[0]))
#print("Shape of y_train:"+str(y_train.shape))