Esempio n. 1
0
def main():
    parser = argparse.ArgumentParser(description='IFT6135')
    parser.add_argument('--batch-size', type=int, default=32)
    parser.add_argument('--use-cuda', type=bool, default=False)
    parser.add_argument('--model', default=None)

    args = parser.parse_args()
    params = parameters.params

    test_batch_loader = batcher.Batcher("./Data/test/", shuffle=False, test=True)

    model = mdl.ConvNet(params)
    model.load_state_dict(t.load(args.model))
    if args.use_cuda:
        model = model.cuda()

    test_step = model.tester()
    img, pred = test_batch_loader.test(test_step, use_cuda=args.use_cuda)

    with open('./Results/current_result.csv', mode='w') as csv_file:
        csv_file.write("id,label\n")
        for i in range(len(img)):
            csv_file.write(img[i].split('/')[-1][:-4] + ',' + test_batch_loader.classes[pred[i]] + '\n')
        csv_file.close()

    return
Esempio n. 2
0
def catenet_all3d(inputs, cfg_initial=None, train=True, **kwargs):
    m = model.ConvNet(**kwargs)

    cfg = cfg_initial

    dropout_default = 0.5
    if 'dropout' in cfg:
        dropout_default = cfg['dropout']

    dropout = dropout_default if train else None

    if dropout == 0:
        dropout = None

    shape_list = inputs.get_shape().as_list()
    assert shape_list[2] == 35, 'Must set expand==1'
    inputs = tf.reshape(inputs, [shape_list[0], shape_list[1], 5, 7, -1])
    m = build_partnet_all3d(m,
                            cfg,
                            "layernum_sub",
                            "subnet",
                            inputs=inputs,
                            layer_offset=0,
                            dropout=dropout)

    return m
Esempio n. 3
0
def catenet(inputs, cfg_initial=None, train=True, **kwargs):
    m = model.ConvNet(**kwargs)

    cfg = cfg_initial

    dropout_default = 0.5
    if 'dropout' in cfg:
        dropout_default = cfg['dropout']

    dropout = dropout_default if train else None

    if dropout == 0:
        dropout = None

    layernum_sub = cfg['layernum_sub']
    for indx_layer in xrange(layernum_sub):
        do_conv = getWhetherConv(indx_layer, cfg)
        if do_conv:
            layer_name = "conv%i" % (1 + indx_layer)
            with tf.variable_scope(layer_name):
                if indx_layer == 0:
                    m.conv(getConvNumFilters(indx_layer, cfg),
                           getConvFilterSize(indx_layer, cfg),
                           getConvStride(indx_layer, cfg),
                           padding='VALID',
                           in_layer=inputs)
                else:
                    m.conv(getConvNumFilters(indx_layer, cfg),
                           getConvFilterSize(indx_layer, cfg),
                           getConvStride(indx_layer, cfg))

                if getWhetherLrn(indx_layer, cfg):
                    print('Lrn used!')
                    m.norm(getLrnDepth(indx_layer, cfg))

                if getWhetherBn(indx_layer, cfg):
                    m.batchnorm_corr(train)

                do_pool = getWhetherPool(indx_layer, cfg)
                if do_pool:
                    m.pool(getPoolFilterSize(indx_layer, cfg),
                           getPoolStride(indx_layer, cfg))

        else:
            layer_name = "fc%i" % (1 + indx_layer)
            with tf.variable_scope(layer_name):
                m.fc(getFcNumFilters(indx_layer, cfg),
                     init='trunc_norm',
                     dropout=dropout,
                     bias=.1)

                if getWhetherBn(indx_layer, cfg):
                    m.batchnorm_corr(train)

    if layernum_sub == 0:
        m.output = inputs
    return m
Esempio n. 4
0
    def __init__(self):

        self.net = model.ConvNet()
        self.net.to(device)

        self.criterion = nn.CrossEntropyLoss()

        self.optimizer = optim.Adam(self.net.parameters(), lr=0.01)
        self.n_epochs = 4
Esempio n. 5
0
def catenet_temp_spa(inputs, cfg_initial, train=True, **kwargs):

    m = model.ConvNet(**kwargs)

    cfg = cfg_initial

    dropout_default = 0.5
    if 'dropout' in cfg:
        dropout_default = cfg['dropout']

    dropout = dropout_default if train else None

    if dropout == 0:
        dropout = None

    curr_layer = 0
    m = build_partnet(m,
                      cfg,
                      "layernum_temp",
                      "tempnet",
                      inputs=inputs,
                      layer_offset=curr_layer,
                      dropout=dropout)
    curr_layer = curr_layer + cfg["layernum_temp"]

    tensor_tmp = m.output
    tensor_tmp = tf.transpose(tensor_tmp, perm=[0, 2, 1, 3])

    shape_list = tensor_tmp.get_shape().as_list()
    tensor_tmp = tf.reshape(tensor_tmp, [shape_list[0], shape_list[1], -1])

    shape_now = tensor_tmp.get_shape().as_list()
    slice0 = tf.slice(tensor_tmp, [0, 0, 0], [-1, 5, -1])
    slice1 = tf.slice(tensor_tmp, [0, 5, 0], [-1, 6, -1])
    slice2 = tf.slice(tensor_tmp, [0, 11, 0], [-1, 14, -1])
    slice3 = tf.slice(tensor_tmp, [0, 25, 0], [-1, 6, -1])

    pad_ten0 = tf.zeros([shape_now[0], 1, shape_now[2]])
    pad_ten1 = tf.zeros([shape_now[0], 1, shape_now[2]])
    pad_ten2 = tf.zeros([shape_now[0], 1, shape_now[2]])
    pad_ten3 = tf.zeros([shape_now[0], 1, shape_now[2]])

    tensor_tmp = tf.concat([
        slice0, pad_ten0, pad_ten1, slice1, pad_ten2, slice2, pad_ten3, slice3
    ], 1)

    tensor_tmp = tf.reshape(tensor_tmp, [shape_list[0], 5, 7, -1])

    m.output = tensor_tmp
    m = build_partnet(m,
                      cfg,
                      "layernum_spa",
                      "spanet",
                      layer_offset=curr_layer,
                      dropout=dropout)

    return m
Esempio n. 6
0
def catenet_add(inputs, cfg_initial=None, train=True, **kwargs):

    m_add = model.ConvNet(**kwargs)
    cfg = cfg_initial

    dropout_default = 0.5
    if 'dropout' in cfg:
        dropout_default = cfg['dropout']

    dropout = dropout_default if train else None

    if dropout == 0:
        dropout = None

    if 'layernum_add' in cfg:
        layernum_add = cfg['layernum_add']
    else:
        layernum_add = 1

    m_add.output = inputs

    for indx_layer in xrange(layernum_add - 1):
        layer_name = "fc_add%i" % (1 + indx_layer)
        with tf.variable_scope(layer_name):
            m_add.fc(getFcNumFilters(indx_layer, cfg, key_want="addnet"),
                     init='trunc_norm',
                     dropout=dropout,
                     bias=.1)

            if getWhetherBn(indx_layer, cfg, key_want="addnet"):
                m.batchnorm_corr(train)

    with tf.variable_scope('fc_add'):
        #m_add.fc(117, init='trunc_norm', activation=None, dropout=None, bias=0)
        m_add.fc(117,
                 init='trunc_norm',
                 activation=None,
                 dropout=None,
                 bias=0,
                 trainable=True)

    total_parameters = 0
    for variable in tf.trainable_variables():
        # shape is an array of tf.Dimension
        shape = variable.get_shape()
        #print(shape)
        #print(len(shape))
        variable_parametes = 1
        for dim in shape:
            #print(dim)
            variable_parametes *= dim.value
        #print(variable_parametes)
        total_parameters += variable_parametes
    print(total_parameters)

    return m_add
Esempio n. 7
0
def catenet_spa_temp_3d(inputs, cfg_initial, train=True, **kwargs):

    m = model.ConvNet(**kwargs)

    cfg = cfg_initial

    dropout_default = 0.5
    if 'dropout' in cfg:
        dropout_default = cfg['dropout']

    dropout = dropout_default if train else None

    if dropout == 0:
        dropout = None

    shape_list = inputs.get_shape().as_list()
    curr_layer = 0

    assert shape_list[2] == 35, 'Must set expand==1'

    inputs = tf.reshape(inputs, [shape_list[0], shape_list[1], 5, 7, -1])
    m = build_partnet_3d(m,
                         cfg,
                         "layernum_spa",
                         "spanet",
                         inputs=inputs,
                         layer_offset=curr_layer,
                         dropout=dropout)
    new_input = m.output
    shape_list_tmp = new_input.get_shape().as_list()
    new_input = tf.reshape(new_input,
                           [shape_list_tmp[0], shape_list_tmp[1], 1, -1])

    m.output = new_input
    curr_layer = curr_layer + cfg["layernum_spa"]
    m = build_partnet(m,
                      cfg,
                      "layernum_temp",
                      "tempnet",
                      layer_offset=curr_layer,
                      dropout=dropout)

    return m
Esempio n. 8
0
def catenet_spa_temp_3din2d(inputs, cfg_initial, train=True, **kwargs):

    m = model.ConvNet(**kwargs)

    cfg = cfg_initial

    dropout_default = 0.5
    if 'dropout' in cfg:
        dropout_default = cfg['dropout']

    dropout = dropout_default if train else None

    if dropout == 0:
        dropout = None

    shape_list = inputs.get_shape().as_list()
    curr_layer = 0

    assert shape_list[2] == 35, 'Must set expand==1'

    m = build_partnet_3din2d(m,
                             cfg,
                             "layernum_spa",
                             "spanet",
                             inputs=inputs,
                             layer_offset=curr_layer,
                             dropout=dropout)

    curr_layer = curr_layer + cfg["layernum_spa"]
    m = build_partnet(m,
                      cfg,
                      "layernum_temp",
                      "tempnet",
                      layer_offset=curr_layer,
                      dropout=dropout)

    return m
Esempio n. 9
0
    **kwargs)
validation_loader = torch.utils.data.DataLoader(
    validation_labeled,
    batch_size=args.test_batch_size,
    shuffle=True,
    **kwargs)
print("Data loaded!")

# layer arguments: layer-type, filter-count, kernel-size, batch-normalization-boolean, activation
# layers = [["convv", 32, 5, False, ""], ["maxpool", 0, 2, True, "lrelu"], ["convv", 64, 3, True, "lrelu"], ["convv", 64, 3, False, ""],
#          ["maxpool", 0, 2, True, "lrelu"], ["convv", 128, 3, True, "lrelu"]]

layers = parse_layers_from_file(args.param_file)

noise = [float(x) for x in args.noise.split(",")]
model = model.ConvNet(layers, noise)

#if args.cuda:
#    model.cuda()

optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=0.0001)
mse = nn.MSELoss(size_average=True)  # mse loss for reconstruction error


def validation(epoch):
    model.eval()
    validation_loss = 0
    correct = 0
    for batch_idx, (data, target) in enumerate(validation_loader):
        #if args.cuda:
        #    data, target = data.cuda(), target.cuda()
Esempio n. 10
0
def main(args, params, valid_acc_thresh=0):
    train_batch_loader = batcher.Batcher("./Data/train/")
    valid_batch_loader = batcher.Batcher("./Data/valid/")

    model = mdl.ConvNet(params)
    if args.use_cuda:
        model = model.cuda()

    learning_rate = params["learning_rate"]
    optimizer = SGD(model.parameters(), learning_rate)

    train_step = model.trainer(optimizer)
    valid_step = model.validator()

    tracking_valid_loss = []
    tracking_valid_acc = []
    tracking_train_loss = []
    tracking_train_loss_epoch = []
    tracking_train_acc = []
    tracking_train_acc_epoch = []
    current_epoch = 0

    while current_epoch < args.epochs:
        iteration = 0
        while current_epoch == train_batch_loader.epoch:
            batch = train_batch_loader.next_batch(batch_size=args.batch_size)

            #we print the result each 50 exemple
            if iteration % 50 == 0:
                loss, acc = train_step(batch, use_cuda=args.use_cuda)
                tracking_train_loss.append(loss)
                tracking_train_acc.append(acc)
                print("Epoch: " + str(current_epoch + 1) + ", It: " +
                      str(iteration + 1) + ", Loss: " + str(loss))
            else:
                loss, acc = train_step(batch, use_cuda=args.use_cuda)
                tracking_train_loss.append(loss)
                tracking_train_acc.append(acc)
            iteration += 1
        current_epoch += 1
        loss_valid, acc_valid = valid_batch_loader.eval(valid_step,
                                                        use_cuda=args.use_cuda)
        tracking_valid_loss.append(loss_valid)
        tracking_valid_acc.append(acc_valid)
        tracking_train_loss_epoch.append(
            sum(tracking_train_loss) / float(len(tracking_train_loss)))
        tracking_train_loss = []
        tracking_train_acc_epoch.append(
            sum(tracking_train_acc) / float(len(tracking_train_acc)))
        tracking_train_acc = []
        print('\n')
        print("***VALIDATION***")
        print("Epoch: " + str(current_epoch) + ", Loss: " + str(loss_valid) +
              ", Acc: " + str(acc_valid))
        print("****************")
        print('\n')
        if tracking_valid_acc[-1] < valid_acc_thresh:
            break
        if current_epoch >= 3:
            """if current_epoch >= 8:
                learning_rate = learning_rate/2
                optimizer = SGD(model.parameters(), learning_rate)
                train_step = model.trainer(optimizer)
            else:"""
            if tracking_valid_loss[-2] <= tracking_valid_loss[-1]:
                learning_rate = learning_rate / 2
                optimizer = SGD(model.parameters(), learning_rate)
                train_step = model.trainer(optimizer)
                print("learning rate adapted to " + str(learning_rate))
    t.save(
        model.state_dict(), "./models/" + args.model_name + "_acc" +
        str(tracking_valid_acc[-1]) + "_e" + str(current_epoch) + ".model")
    plt.plot(range(len(tracking_train_loss_epoch)),
             tracking_train_loss_epoch,
             label="train")
    plt.plot(range(len(tracking_train_loss_epoch)),
             tracking_valid_loss,
             label="valid")
    plt.xlabel("epoch")
    plt.ylabel("loss")
    plt.show()
    plt.plot(range(len(tracking_train_loss_epoch)),
             tracking_train_acc_epoch,
             label="train")
    plt.plot(range(len(tracking_train_loss_epoch)),
             tracking_valid_acc,
             label="valid")
    plt.xlabel("epoch")
    plt.ylabel("accuracy")
    plt.show()
    with open("./rescc" + ("_m" if args.modified_loss else "") + ".txt",
              'w') as f:
        f.write(str(tracking_train_loss_epoch))
        f.write('\n')
        f.write(str(tracking_train_acc_epoch))
        f.write('\n')
        f.write(str(tracking_valid_loss))
        f.write('\n')
        f.write(str(tracking_valid_acc))
        f.close()

    return tracking_valid_loss[-1], tracking_valid_acc[-1]
Esempio n. 11
0
		label = torch.tensor(label,dtype=torch.long)
		output = model(batch_data).squeeze(1)
		loss = criterion(output,label)
		loss.backward()
		optim.step()
		all_loss += loss
		if it % 10 == 0:
			loss_buf.append(all_loss.item()/600)
			all_loss = 0
		if it % 100 == 0:
			print('have finished % {}'.format((it//100+1)*10))
	return loss_buf
input_size = 28*28
output_size = 10
#net = model.NeuralNet(input_size,output_size)
net = model.ConvNet(output_size)


loss_buf = []
epochs = 10
lr = 0.01
criterion = nn.NLLLoss()
optimizer = optim.SGD(net.parameters(),lr=lr)
for epoch in range(epochs):
	print("in epoch {}:".format(epoch))
	loss_buf += train(train_set_image,train_set_label,net,criterion,optimizer)

def isPredictRight(out,y):
	if out.argmax(dim=2) == y:
		return 1
	else:
Esempio n. 12
0
def catenet_tnn(inputs,
                cfg_path,
                train=True,
                tnndecay=0.1,
                decaytrain=0,
                cfg_initial=None,
                cmu=0,
                fixweights=False,
                seed=0,
                **kwargs):
    m = model.ConvNet(fixweights=fixweights, seed=seed, **kwargs)

    params = {'input': inputs.name, 'type': 'fc'}

    dropout = 0.5 if train else None

    # Get inputs
    shape_list = inputs.get_shape().as_list()
    assert shape_list[2] == 35, 'Must set expand==1'
    sep_num = shape_list[1]
    if not cfg_initial is None and 'sep_num' in cfg_initial:
        sep_num = cfg_initial['sep_num']
    small_inputs = tf.split(inputs, sep_num, 1)
    for indx_time in xrange(len(small_inputs)):
        small_inputs[indx_time] = tf.transpose(small_inputs[indx_time],
                                               [0, 2, 1, 3])
        small_inputs[indx_time] = tf.reshape(small_inputs[indx_time],
                                             [shape_list[0], 5, 7, -1])

    G = main.graph_from_json(cfg_path)

    if 'all_conn' in cfg_initial:
        node_list = ['conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'conv6']

        MASTER_EDGES = []
        for i in range(len(node_list)):
            for j in range(len(node_list)):
                if (j > i + 1 or i > j) and (
                        j > 0):  #since (i, j) w/ j > i is already an edge
                    MASTER_EDGES.append((node_list[i], node_list[j]))

        print(MASTER_EDGES)
        G.add_edges_from(MASTER_EDGES)

    for node, attr in G.nodes(data=True):

        memory_func, memory_param = attr['kwargs']['memory']
        if 'nunits' in memory_param:
            attr['cell'] = tnn_LSTMCell
        else:
            memory_param['memory_decay'] = tnndecay
            memory_param['trainable'] = decaytrain == 1
            attr['kwargs']['memory'] = (memory_func, memory_param)

        if fixweights:
            if node.startswith('conv'):
                _, prememory_param = attr['kwargs']['pre_memory'][0]
                attr['kwargs']['pre_memory'][0] = (model.conv_fix,
                                                   prememory_param)

            if node.startswith('fc'):
                _, prememory_param = attr['kwargs']['pre_memory'][0]
                attr['kwargs']['pre_memory'][0] = (model.fc_fix,
                                                   prememory_param)

        if not seed == 0:
            for sub_prememory in attr['kwargs']['pre_memory']:
                prememory_func, prememory_param = sub_prememory
                if 'kernel_init_kwargs' in prememory_param:
                    prememory_param['kernel_init_kwargs']['seed'] = seed

        if node in ['fc7', 'fc8']:
            attr['kwargs']['pre_memory'][0][1]['dropout'] = dropout

    main.init_nodes(G, batch_size=shape_list[0])
    main.unroll(G, input_seq={'conv1': small_inputs}, ntimes=len(small_inputs))

    if not 'retres' in cfg_initial:
        if cmu == 0:
            m.output = G.node['fc8']['outputs'][-1]
        else:
            m.output = tf.transpose(tf.stack(G.node['fc8']['outputs']),
                                    [1, 2, 0])
    else:
        m.output = tf.concat(
            [G.node['fc8']['outputs'][x] for x in cfg_initial['retres']], 1)

    print(len(G.node['fc8']['outputs']))
    m.params = params

    return m
Esempio n. 13
0
def catenet_temp_spa_sep(inputs, cfg_initial, train=True, **kwargs):

    m = model.ConvNet(**kwargs)

    cfg = cfg_initial

    dropout_default = 0.5
    if 'dropout' in cfg:
        dropout_default = cfg['dropout']

    dropout = dropout_default if train else None

    if dropout == 0:
        dropout = None

    curr_layer = 0
    m = build_partnet(m,
                      cfg,
                      "layernum_temp",
                      "tempnet",
                      inputs=inputs,
                      layer_offset=curr_layer,
                      dropout=dropout)
    curr_layer = curr_layer + cfg["layernum_temp"]

    tensor_tmp = m.output
    tensor_tmp = tf.transpose(tensor_tmp, perm=[0, 2, 1, 3])

    shape_list = tensor_tmp.get_shape().as_list()
    #print(shape_list)
    sep_num = 9
    if "sep_num" in cfg:
        sep_num = cfg["sep_num"]

    small_inputs = tf.split(tensor_tmp, sep_num, 2)
    small_outputs = []

    first_flag = True

    for small_input in small_inputs:
        tensor_tmp = tf.reshape(small_input,
                                [shape_list[0], shape_list[1], -1])

        shape_now = tensor_tmp.get_shape().as_list()
        slice0 = tf.slice(tensor_tmp, [0, 0, 0], [-1, 5, -1])
        slice1 = tf.slice(tensor_tmp, [0, 5, 0], [-1, 6, -1])
        slice2 = tf.slice(tensor_tmp, [0, 11, 0], [-1, 14, -1])
        slice3 = tf.slice(tensor_tmp, [0, 25, 0], [-1, 6, -1])

        pad_ten0 = tf.zeros([shape_now[0], 1, shape_now[2]])
        pad_ten1 = tf.zeros([shape_now[0], 1, shape_now[2]])
        pad_ten2 = tf.zeros([shape_now[0], 1, shape_now[2]])
        pad_ten3 = tf.zeros([shape_now[0], 1, shape_now[2]])

        tensor_tmp = tf.concat([
            slice0, pad_ten0, pad_ten1, slice1, pad_ten2, slice2, pad_ten3,
            slice3
        ], 1)

        tensor_tmp = tf.reshape(tensor_tmp, [shape_list[0], 5, 7, -1])

        m.output = tensor_tmp
        if first_flag:
            with tf.variable_scope("small"):
                m = build_partnet(m,
                                  cfg,
                                  "layernum_spa",
                                  "spanet",
                                  layer_offset=curr_layer,
                                  dropout=dropout)
            first_flag = False
        else:
            with tf.variable_scope("small", reuse=True):
                m = build_partnet(m,
                                  cfg,
                                  "layernum_spa",
                                  "spanet",
                                  layer_offset=curr_layer,
                                  dropout=dropout)

        small_outputs.append(m.output)

    new_inputs = tf.concat(small_outputs, 1)
    m.output = new_inputs
    curr_layer = curr_layer + cfg["layernum_spa"]

    m = build_partnet(m,
                      cfg,
                      "layernum_spatemp",
                      "spatempnet",
                      layer_offset=curr_layer,
                      dropout=dropout)

    return m
Esempio n. 14
0
def catenet_spa_temp(inputs, cfg_initial, train=True, **kwargs):

    m = model.ConvNet(**kwargs)

    cfg = cfg_initial

    dropout_default = 0.5
    if 'dropout' in cfg:
        dropout_default = cfg['dropout']

    dropout = dropout_default if train else None

    if dropout == 0:
        dropout = None

    inputs = tf.transpose(inputs, [0, 2, 1, 3])
    shape_list = inputs.get_shape().as_list()
    split_num = shape_list[2]
    if 'split_num' in cfg:
        split_num = cfg['split_num']
    small_inputs = tf.split(inputs, split_num, 2)
    small_outputs = []
    curr_layer = 0

    assert shape_list[1] == 35, 'Must set expand==1'

    first_flag = True

    for small_input in small_inputs:
        small_input = tf.reshape(small_input, [shape_list[0], 5, 7, -1])
        if first_flag:
            with tf.variable_scope("small"):
                m = build_partnet(m,
                                  cfg,
                                  "layernum_spa",
                                  "spanet",
                                  inputs=small_input,
                                  layer_offset=curr_layer,
                                  dropout=dropout)
            first_flag = False
        else:
            with tf.variable_scope("small", reuse=True):
                m = build_partnet(m,
                                  cfg,
                                  "layernum_spa",
                                  "spanet",
                                  inputs=small_input,
                                  layer_offset=curr_layer,
                                  dropout=dropout)
        small_output = m.output
        shape_list_tmp = small_output.get_shape().as_list()
        small_output = tf.reshape(small_output, [shape_list_tmp[0], 1, 1, -1])
        small_outputs.append(small_output)

    new_input = tf.concat(small_outputs, 1)
    m.output = new_input
    curr_layer = curr_layer + cfg["layernum_spa"]
    m = build_partnet(m,
                      cfg,
                      "layernum_temp",
                      "tempnet",
                      layer_offset=curr_layer,
                      dropout=dropout)

    return m