Ejemplo n.º 1
0
def train(save_path='', load_path=False):
    #    db_path = '/home/yash/Project/dataset/GraphSimilarity/reddit_multi_5K.graph'
    #    db = Data(db_path)
    class_count = db.classes
    val_size = db.val_size
    inp_size = db.size

    if load_path:
        net = load(load_path)
    else:
        net = [g_cnn(prv_count = val_size, filter_count = 4), g_pool(), \
               g_cnn(prv_count = 4,        filter_count = 8), g_pool(flat=True), \
               fc_nn(prv = ((inp_size//2)//2)*8, nodes = 1024), \
               fc_nn(prv = 1024, nodes = 256, dropout = True), \
               fc_nn(prv = 256, nodes = class_count, fn="Softmax") ]

    epoch = 25
    checkpoint = 10
    batch_size = 1
    train_error = np.zeros(epoch * 5000 // batch_size, np.float)
    valid_error = np.zeros(epoch)
    ctr = 0
    for i in range(epoch):
        while (db.has_more):

            data_batch = db.next_batch()
            for d in data_batch:
                e = train_step(net, d)
                #print(e)
                train_error[ctr] += np.sum(np.abs(e))

            #train_error[ctr] /= batch_size
            update(net, batch_size)

            ctr += 1
            if ctr % checkpoint == 0:
                print("Batch [%d]: Training Error: [%f]" %
                      (ctr, train_error[ctr]))

        db.has_more = True
        data_batch = db.get_test()
        for d in data_batch:
            pred = fwd_pass(net, d[0])
            valid_error[i] += -np.sum(calc_error(pred, d[1]))
        valid_error[i] /= len(data_batch)

        save(net, save_path)
        print("Epoch [%d]> Validation Error: [%f]" % (i, valid_error[i]))
Ejemplo n.º 2
0
def train(save_path = '', load_path = False):
#    db_path = '/home/yash/Project/dataset/GraphSimilarity/reddit_multi_5K.graph'
#    db = Data(db_path)
    class_count = db.classes
    val_size    = db.val_size
    inp_size    = db.size
    
    if load_path:
        net = load(load_path)
    else:
        net = [g_cnn(prv_count = val_size, filter_count = 4), g_pool(), \
               g_cnn(prv_count = 4,        filter_count = 8), g_pool(flat=True), \
               fc_nn(prv = ((inp_size//2)//2)*8, nodes = 1024), \
               fc_nn(prv = 1024, nodes = 256, dropout = True), \
               fc_nn(prv = 256, nodes = class_count, fn="Softmax") ]
          
    epoch = 25
    checkpoint = 10
    batch_size = 1
    train_error = np.zeros(epoch*5000//batch_size, np.float)
    valid_error = np.zeros(epoch)
    ctr = 0
    for i in range(epoch):
        while(db.has_more):
            
            data_batch = db.next_batch()
            for d in data_batch:
                e = train_step(net, d)
                #print(e)
                train_error[ctr] += np.sum(np.abs(e))     
            
            #train_error[ctr] /= batch_size
            update(net, batch_size)
            
            ctr += 1
            if ctr%checkpoint == 0:
                print("Batch [%d]: Training Error: [%f]" %(ctr, train_error[ctr]))
        
        db.has_more = True
        data_batch = db.get_test()
        for d in data_batch:
            pred = fwd_pass(net, d[0])
            valid_error[i] += -np.sum(calc_error(pred, d[1]))
        valid_error[i] /= len(data_batch)    
        
        save(net, save_path)
        print("Epoch [%d]> Validation Error: [%f]" %(i, valid_error[i]))
Ejemplo n.º 3
0
                                   fill_mode='nearest')

test_datagen = ImageDataGenerator(rescale=1. / 255)

print "loading original inception model"

model = net.build_model(nb_classes)
model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=["accuracy"])

# train the model on the new data for a few epochs

print "training the newly added dense layers"

net.save(model, model_file_prefix)

# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 172 layers and unfreeze the rest:
for layer in model.layers[:172]:
    layer.trainable = False
for layer in model.layers[172:]:
    layer.trainable = True

# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
              loss='categorical_crossentropy',
              metrics=["accuracy"])

# we train our model again (this time fine-tuning the top 2 inception blocks
Ejemplo n.º 4
0
    grid=True,
)

# %%
# show how many epochs there were in the end
TRAIN_FINAL.describe()

# %% [markdown]
#  ## Analyzing the Final Neural Network

# %%
FINAL_NET = TRAIN_FINAL.iloc[-1].net

# %%
# store final network to cache
nn.save(FINAL_NET, os.path.join(DATA_PATH, 'final-net.pkl'))

# %%
# load final network from cache
FINAL_NET = nn.load(os.path.join(DATA_PATH, 'final-net.pkl'))

# %%
# show the average error per digit set

net = FINAL_NET
result = {
    'kind': [],
    'error': [],
}

for kind in dg.ALL_KINDS:
Ejemplo n.º 5
0
#1# Original InceptionV3 model loading
model = net.build_model(CLASSES_COUNT)
model.compile(
    optimizer='rmsprop',
    loss='categorical_crossentropy',
    metrics=[metrics.categorical_accuracy, metrics.top_k_categorical_accuracy])
logger.log("The Original InceptionV3 model has been loaded", 0)

#2# Train the model on the new data for a few epochs and save
logger.log("Model first train, evaluation and save", 0)
first_train_start = time.time()

filepath = MODEL_FILE_FULL_PATH + "_0.h5"
train_model(model, filepath)

net.save(model, tags, MODEL_FILE_FULL_PATH + "_0")
logger.execution_time(first_train_start,
                      "Model first train, evaluation and save", 0)

#3# Fine-tuning convolutional layers from Inception V3
logger.log("Fine-tuning convolutional layers from Inception V3", 0)
finetuning_start = time.time()

## We freeze the first 172 layers and unfreeze the top 2 inception blocks
for layer in model.layers[:172]:
    layer.trainable = False
for layer in model.layers[172:]:
    layer.trainable = True
logger.log(
    "The first 172 layers are now freezed and the top 2 inception blocks are trainable",
    1)
Ejemplo n.º 6
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 20 01:54:02 2021
@author: fatemeh tahrirchi
"""
import datasets, net
from preprocessing import Preprocessing, CharVectorizer
from net import VDCNN, train, save
import lmdb
import numpy as np
from tqdm import tqdm
import argparse
import torch
from torch.utils.data import DataLoader, Dataset
import os, subprocess
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
MODELS_FOLDER = 'models/vdcnn'
DATA_FOLDER = 'datasets'
DATASET = 'yelp_review_full'  #['yelp_review_full','yelp_review_polarity']
PREPROCES_TYPE = 'lower'  #['lower','denoiser','add_pos','add_hashtag','add_NOT']

# get device to calculate on (either CPU or GPU with minimum memory load)
def get_gpu_memory_map():
    result = subprocess.check_output([
        'nvidia-smi', '--query-gpu=memory.used',
        '--format=csv,nounits,noheader'
    ],
                                     encoding='utf-8')
Ejemplo n.º 7
0
                # print('state size',len(states))
            # Swap turns and increment move count
            moves += 1
            g.turn = g.get_opponent(g.turn)
            reward = 0
            if g.game_over():
                print("Game over in {} moves".format(moves))
                print("Num states: ", len(states))
                print("{} won".format(g.find_winner()))

                if g.find_winner() == "white":
                    reward = 1
                    wins += 1
                for i in range(len(g.board)):
                    g.print_point(i)

    # Build the eligibility trace with the list of states white has accumulated
    for i in range(0, len(states) - 2):
        # print("State:", i)

        # Feed in current state and the next state
        # the eligibility is based on states t and t+1
        current_state = states[i]
        predicted_state = states[i + 1]

        error = net.getValue(predicted_state)[0] - net.getValue(current_state)[0]
        net.feedforward(current_state)
        net.do_td(current_state, net.getValue(current_state), error)
    print("Win percentage: {}".format(wins / count))
net.save()
Ejemplo n.º 8
0
# train the model on the new data for a few epochs

print "training the newly added dense layers, train=%d test=%d batch/%d" % (X_train.shape[0], X_test.shape[0], batch_size)

model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size, shuffle=True),
            steps_per_epoch=X_train.shape[0]/batch_size,
            epochs=epoch,
            validation_data=datagen.flow(X_test, Y_test, batch_size=batch_size),
            validation_steps=X_test.shape[0]/batch_size,
            callbacks=[TensorBoard(log_dir='./tb_logs')],
            )

evaluate(model, "000.png")

net.save(model, tags, model_file_prefix)

# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers from inception V3. We will freeze the bottom N layers
# and train the remaining top layers.

# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 172 layers and unfreeze the rest:
for layer in model.layers[:172]:
   layer.trainable = False
for layer in model.layers[172:]:
   layer.trainable = True

# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
# model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=["accuracy"])