def __init__(self):
     self.root = tkinter.Tk()
     self.root.geometry("500x420")
     self.root.resizable(False, False)
     self.root.title('Kinship Verification')
     super().__init__()
     net.train_network()
     self.first_frame()
     self.root.mainloop()
Exemple #2
0
def main():
    args, device = get_config()

    # load in the inputs and outputs I built in matlab (before realising I really want to train this in pytorch)
    lookup, inputs, words = setup_inputs(args)
    attributes = setup_outputs(args, lookup)

    # define train and test sets using our Dataset-inherited class
    dataset = {
        'index': list(range(args.n_unique)),
        'input_item': inputs[0],
        'input_context': inputs[1],
        'label': attributes,
        'words': words,
        'domains': inputs[2]
    }
    trainset = net.CreateDataset(dataset)
    testset = net.CreateDataset(
        dataset
    )  # HRS note that, for now, train and test are same dataset. As in Rogers/McClelland

    # train and test network
    model, id = net.train_network(args, device, trainset, testset)
    args.id = id
    # analyse trained network hidden activations
    analyse_network(args, trainset, testset, lookup)
def main():
    args, device = get_config()

    # load in the inputs and outputs I built in matlab (before realising I really want to train this in pytorch)
    lookup, inputs = setup_inputs(args)
    outputs = setup_outputs(args, lookup)

    # define train and test sets using our Dataset-inherited class
    dataset = {
        'index': list(range(args.n_unique)),
        'input_features': inputs,
        'label': outputs
    }
    trainset = net.CreateDataset(dataset)
    testset = net.CreateDataset(
        dataset
    )  # HRS note that, for now, train and test are same dataset. As in Rogers/McClelland

    # embed()
    # train and test network
    model, id, json_path = net.train_network(args, device, trainset, testset)
    args.id = id
    # analyse trained network hidden activations
    analyse_network(args, trainset, testset, lookup)

    # plot training record and save it
    plot_learning_curve(args, json_path)
Exemple #4
0
    def initialize_network(self):

        self.batch_size = 1

        self.X = tf.placeholder(dtype=tf.float32,
                                shape=(self.batch_size, 224, 384, 8))

        self.predict_flow5, self.predict_flow2 = network.train_network(self.X)
Exemple #5
0
	def initialize_network(self,lbl=False):

		self.batch_size = 1

		self.X = tf.placeholder(dtype=tf.float32, shape=(self.batch_size, 160, 256, 8))
		self.Y = tf.placeholder(dtype=tf.float32, shape=(self.batch_size, 160, 256, 2))

		self.predict_flow2 = network.train_network(self.X)

		self.predict_flow2 = self.predict_flow2[0]

		if FLAGS.PARSING_MID == True:
			self.lossee = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(self.Y, self.predict_flow2))))
		else:
			self.lossee = None
Exemple #6
0
import network as Network

Network = Network.Network()
num_nodes = 100

input_probability = 0.3
reservoir_probability = 0.3
classifier = "log"

Network.T = sum(len_segments_train)  
Network.n_min = 1
Network.K = 3
Network.N = num_nodes

Network.setup_network(train_data, num_nodes, input_probability, reservoir_probability, num_groups, num_segments_train)
Network.train_network(num_groups, classifier, num_segments_train, len_segments_train, labels_train, num_nodes)

Network.mean_test_matrix = np.zeros([Network.N, num_segments_test])
Network.test_network(test_data, num_segments_test, len_segments_test, num_nodes, num_groups, sum(len_segments_test))

if classifier == 'log':
    print(f'Performance using {classifier} : {Network.regressor.score(Network.mean_test_matrix.T,labels_test.T)}')
    prediction = Network.regressor.predict(Network.mean_test_matrix.T)
    
    
#%% Cell 10: Plot confusion matrix.
from sklearn.metrics import plot_confusion_matrix

fig, ax = plt.subplots(figsize = (12,9))
disp = plot_confusion_matrix(Network.regressor, Network.mean_test_matrix.T, labels_test.T, normalize='true', ax=ax)
disp.ax_.set_title("Confusion matrix")
Exemple #7
0
import network

if __name__ == '__main__':
    network.train_network('weights.h5')
Exemple #8
0
        [predict_flow_forward_ref1, predict_flow_backward_ref1]
    }


X = tf.placeholder(dtype=tf.float32, shape=(1, 2, 224, 384, 8))
Y = tf.placeholder(dtype=tf.float32, shape=(1, 2, 224, 384, 3))

X_forward, Y_forward = X[:, 0, :, :, :], Y[:, 0, :, :, :]
X_backward, Y_backward = X[:, 1, :, :, :], Y[:, 1, :, :, :]

X_forward, Y_forward = further_resize_imgs_lbls(X_forward, Y_forward)
X_backward, Y_backward = further_resize_imgs_lbls(X_backward, Y_backward)

concatenated_FB_images = tf.concat([X_forward, X_backward], axis=0)

predict_flows = network.train_network(concatenated_FB_images)
flows_dict = get_predict_flow_forward_backward(predict_flows)

################ epe loss #######################

total_loss = tf.sqrt(
    tf.reduce_mean(
        tf.square(tf.subtract(Y_forward, flows_dict['predict_flow'][0]))))

# sess.run(test_iterator.initializer)

test_image_batch, test_label_batch, filename1, filename2 = test_iterator.get_next(
)[0]

summaies = []
########################
# Define the network parameters
########################

Network.T = d.training_data.shape[1]  #Number of training time steps
Network.n_min = 2540  #Number time steps dismissed
Network.K = 128  #Input layer size
Network.N = num_nodes  #Reservoir layer size

Network.u = d.training_data
Network.y_teach = d.training_results

Network.setup_network(d, num_nodes, input_probability, reservoir_probability,
                      d.data.shape[-1])

Network.train_network(d.data.shape[-1], classifier, d.num_columns,
                      d.num_trials_train, d.train_labels, Network.N)

Network.mean_test_matrix = np.zeros(
    [Network.N, d.num_trials_test, d.data.shape[-1]])

Network.test_network(d.test_data,
                     d.num_columns,
                     d.num_trials_test,
                     Network.N,
                     d.data.shape[-1],
                     t_autonom=d.test_data.shape[1])

if classifier == 'lin':
    print(
        f'Performance for {test_name} using {classifier} : {d.accuracy_lin(Network.regressor.predict(Network.mean_test_matrix.T),d.test_labels)}'
    )
Exemple #10
0
                default="vgg13",
                type=str)
ap.add_argument('--hidden_units',
                type=int,
                dest="hidden_units",
                action="store",
                default=120)

pa = ap.parse_args()
where = pa.data_dir
path = pa.save_dir
lr = pa.learning_rate
structure = pa.arch
dropout = pa.dropout
hidden_layer1 = pa.hidden_units
power = pa.gpu
epochs = pa.epochs

trainloader, v_loader, testloader, train_data = network.load_data(where)

model, optimizer, criterion = network.nn_network(structure, dropout,
                                                 hidden_layer1, lr, power)

network.train_network(model, optimizer, criterion, trainloader, v_loader,
                      epochs, 20, power)

network.save_checkpoint(model, train_data, path, structure, hidden_layer1,
                        dropout, lr)

print("All Set and Done. The Model is trained")
Exemple #11
0
# validate the data directory
if not os.path.isdir(args.data_dir):
    print(args.data_dir, 'is not a valid data directory.')
    exit()

# create data loader
data = data.create_data_loaders(args.data_dir)

# create the network
hidden_units = [4000, 1000, 200]
if len(args.hidden_units) > 0:
    hidden_units = args.hidden_units[0]

model = network.create_network(args.arch, hidden_units)

# train the network
device = 'cuda' if args.gpu else 'cpu'
network.train_network(model,
                      data['train'],
                      data['valid'],
                      args.epochs,
                      device,
                      fake=args.fake)

# save the checkpoint
checkpoint_filename = 'checkpoint_' + args.arch + '_' + time.strftime(
    "%Y-%m-%dT%H-%M-%S")
checkpoint_path = args.save_dir + '/' + checkpoint_filename
network.save_checkpoint(model, checkpoint_path)
print("Network written to", checkpoint_path)
Exemple #12
0
    warped_img = sess.run(warped_img)
    warped_img = np.squeeze(warped_img)

    # Image.fromarray(np.uint8(img2_orig)).show()
    Image.fromarray(np.uint8(warped_img)).show()
    print(loss)


def load_model_ckpt(sess, filename):
    saver = tf.train.Saver()
    saver.restore(sess, tf.train.latest_checkpoint(filename))


input_size = (384, 224)
sess = tf.InteractiveSession()
X = tf.placeholder(dtype=tf.float32, shape=(1, 224, 384, 8))
Y = tf.placeholder(dtype=tf.float32, shape=(1, 224, 384, 2))

predict_flows = network.train_network(X)

predict_flow2 = predict_flows[0]

# Y = further_resize_lbls(Y)

predict_flow2 = predict_flow2[:, :, :, 0:2]
loss_result = lhpl.endpoint_loss(Y, predict_flow2, 1)
# loss_result = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(Y, predict_flow2))))

load_model_ckpt(sess, FLAGS.CKPT_FOLDER)

perform_testing()
Exemple #13
0
from keras.layers.normalization import BatchNormalization

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

if __name__ == "__main__":
    time1 = time.time()
    data_path = "dataset/train_sales_data.csv"
    ####################  ######################
    mode = "train"  # train test docker
    print("#################work mode", mode, "#######################")
    if mode == "train":
        # 数据预处理
        #(trainX, trainY) = proprocess.generate_train_seq(train_images_folder_path, train_track_folder_path)
        load_data = proprocess.DataSets
        trainX, trainY, validX, validY = load_data.load_passenger_car(
            data_path)
        model = network.build_network()
        history = network.train_network(trainX,
                                        trainY,
                                        validX,
                                        validY,
                                        model,
                                        epochs=1000)
        network.plt_result(history, "output", "history.png")
    elif mode == "test":
        network.helloworld()
    else:
        print("mode error!")
    time2 = time.time()
    print('time use:' + str(time2 - time1) + 's')
Exemple #14
0
#create model using --arch argument with vgg19 as default
model = utility.torch_model(args.arch)
for param in model.parameters():
    param.requires_grad = False
#params are now frozen so that we do not backprop thru them again

#calculate input size into the network classifier
input_size = utility.get_input_size(model, args.arch)

model.classifier = network.Network(input_size,
                                   args.output_size, [args.hidden_units],
                                   drop_p=0.35)

#define the loss function and the optimization parameters
criterion = nn.NLLLoss(
)  #want nllloss because we do the logsoftmax as our output activation
optimizer = optim.SGD(model.classifier.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.1)

#train model
network.train_network(model, trainloader, validloader, args.epochs, 32,
                      criterion, optimizer, scheduler, args.gpu)

#test model
test_accuracy, test_loss = network.check_accuracy_loss(model, testloader,
                                                       criterion, args.gpu)
print("\n ---\n Test Accuracy: {:.2f} %".format(test_accuracy * 100),
      "Test Loss: {}".format(test_loss))

#save network to checkpoint
utility.save_checkpoint(model, train_data, optimizer, args.save_dir, args.arch)
file_ext = ".npy"  # numpy bitmap arrays
classes = ["circle", "square", "hexagon"]  # classes to train network on

# load data
x, y = ld.load(classes=classes,
               data_dir=data_dir,
               file_ext=file_ext,
               samples=10,
               reload_data=False)
x_train, y_train, x_test, y_test = ld.divide_into_sets(input=x,
                                                       response=y,
                                                       ratio=0.2)

# train network
yolo = net.build_network(y=y_train,
                         batch=6,
                         version=1,
                         input_size=(448, 448, 3),
                         output_size=392)
yolo = net.train_network(network=yolo,
                         x=x_train,
                         y=y_train,
                         batch=6,
                         iterations=5)
performance_metrics = net.test_network(network=yolo, x=x_test, y=y_test)
print(performance_metrics)

# test to see if the network can correctly generate the output for an example
res = yolo.predict(x_test[5].reshape(1, 448, 448, 3))
print(res)