Beispiel #1
0
def run_test(session, model, model_input):
    # Run test epoch
    perp, preds = run_epoch(session, model)

    # Fetch useful information about the test
    targs = model_input.targets.reshape([1,-1])
    meta = model_input.meta
    num_steps = model_input.num_steps
    batch_size = model_input.batch_size
    num_condits = len(meta)
    num_preds = num_steps * batch_size
    max_targs = max([x[1] for x in meta])

    # Create an empty container
    r = np.empty([num_condits, max_targs])
    r[:] = 0

    # Extract useful data
    targ_preds = np.squeeze(preds[np.arange(num_preds),targs])
    j = 0
    for i, (c, ppc) in enumerate(meta):
        crit_inds = [x for x in range(c-1,num_steps*ppc-1,num_steps)]
        condit = targ_preds[j:j + num_steps * ppc]
        r[i,0:ppc] = condit[crit_inds]
        j += num_steps * ppc
    return r
Beispiel #2
0
def main(config):
    if config.outf is None:
        config.outf = 'result'
    os.system('mkdir {0}'.format(config.outf))

    DataLoader = Dataset(config.dataset, config.datapath, config.level,
                         config.num_classes, config.num_classes2,
                         config.num_classes3)
    Input_train, Input_test = DataLoader(config.validation)

    ### writing results ###
    filename = config.savename  #+'_ver:'+str(config.version)+'_pad:'+str(config.padding)+'_norm:'+str(config.norm)
    savepath = os.path.join(config.outf, filename)
    pfile = open(savepath, 'w+')
    pfile.write('dataset: ' + str(config.dataset) + '\n')
    pfile.write('level: ' + str(config.level) + '\n')
    pfile.write('batch size: ' + str(config.batch_size) + '\n')
    pfile.write('initial learning rate: ' + str(config.learning_rate) + '\n')
    pfile.write('validation split: ' + str(config.validation) + '\n')
    pfile.write('regularization rate: ' + str(config.beta) + '\n')
    pfile.write('n: ' + str(config.num_layers) + '\n')
    pfile.close()

    with tf.Graph().as_default():
        Model = SparseResNet
        trainModel = Model(config, is_training=True)
        testModel = Model(config, is_training=False)

        with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
                allow_growth=True))) as sess:
            init = tf.global_variables_initializer()
            #print(init.node_def)
            sess.run(init)
            print("initialize all variables")

            max = 0
            pre_val = 0
            count = 0
            num_change = 0
            count_epoch = 0
            for i in range(config.num_epoch):
                ### data shuffle ###
                train_data, train_labels = Input_train[0], Input_train[1]

                tmp = list(zip(train_data, train_labels))
                random.shuffle(tmp)
                train_data, train_labels = zip(*tmp)

                Input_train = [train_data, train_labels]

                print("\nEpoch: %d/%d" % (i + 1, config.num_epoch))

                train_accur = run_epoch(sess, trainModel, Input_train,
                                        config.level, True)

                test_accur = run_epoch(sess, testModel, Input_test,
                                       config.level)

                if (test_accur > max):
                    max = test_accur

                print("lv%d - train accur: %.3f" % (config.level, train_accur))
                pfile = open(savepath, 'a+')
                pfile.write("\nEpoch: %d/%d\n" % (i + 1, config.num_epoch))
                pfile.write("lv%d - train: %.3f\n" %
                            (config.level, train_accur))
                pfile.close()

                ### if validation accuracy decreased, decrease learning rate ###
                if (i >= 100 and i < 150):
                    trainModel.lr = config.learning_rate / 5
                elif (i >= 150):
                    trainModel.lr = config.learning_rate / 25
                elif (i >= 200):
                    trainModel.lr = config.learning_rate / 125
                '''
                count_epoch += 1
                if (test_accur < pre_val):
                    count += 1
                if count >= 3 and num_change < 4 and count_epoch > 30:
                    trainModel.lr /= 10
                    print('learning rate %g:' %(trainModel.lr))
                    pfile = open(savepath, 'a+')
                    pfile.write('\nChange Learning Rate')
                    pfile.write('\nlearning rate: %g\n' %trainModel.lr)
                    pfile.close()
                    num_change += 1
                    count = 0
                    count_epoch = 0
                pre_val = test_accur
                '''

                print("lv%d - test accur: %.3f / max: %.3f" %
                      (config.level, test_accur, max))
                pfile = open(savepath, 'a+')
                pfile.write("lv%d - test accur: %.3f / max: %.3f\n" %
                            (config.level, test_accur, max))
                pfile.close()
Beispiel #3
0
def main(config):
    if config.outf is None:
        config.outf = 'sample'
    os.system('mkdir {0}'.format(config.outf))

    CIFAR10_PATH = '../data/cifar-10'
    CIFAR100_PATH = '../data/cifar-100'
    if config.dataset == 'cifar10':
        DataLoader = Dataset(config.dataset, CIFAR10_PATH, config.num_classes)
    elif config.dataset == 'cifar100':
        DataLoader = Dataset(config.dataset, CIFAR100_PATH, config.num_classes)

    Input_train, Input_test = DataLoader(config.validation)

    ### writing results ###
    filename = config.savename  #+'_pad:'+str(config.padding)+'_norm:'+str(config.norm)
    savepath = os.path.join(config.outf, filename)
    pfile = open(savepath, 'w+')
    pfile.write('dataset: ' + str(config.dataset) + '\n')
    pfile.write('num epoch: ' + str(config.num_epoch) + '\n')
    pfile.write('batch size: ' + str(config.batch_size) + '\n')
    pfile.write('initial learning rate: ' + str(config.learning_rate) + '\n')
    pfile.write('validation split: ' + str(config.validation) + '\n')
    pfile.write('regularization rate: ' + str(config.beta) + '\n')
    pfile.write('n: ' + str(config.num_layers) + '\n')
    pfile.close()

    with tf.Graph().as_default():
        trainModel = ResNet(config, is_training=True)
        config.batch_size = 100
        testModel = ResNet(config, is_training=False)

        with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
                allow_growth=True))) as sess:
            init = tf.global_variables_initializer()
            #print(init.node_def)
            sess.run(init)
            print("initialize all variables")

            pre_val = 0
            count = 0
            count_epoch = 0
            num_change = 0
            max_accur = 0
            for i in range(config.num_epoch):
                ### data shuffle ###
                train_data, train_labels = Input_train[0], Input_train[1]
                tmp = list(zip(train_data, train_labels))
                random.shuffle(tmp)
                train_data, train_labels = zip(*tmp)
                Input_train = [train_data, train_labels]

                train_accur = run_epoch(sess,
                                        trainModel,
                                        Input_train,
                                        printOn=True)
                #val_accur = run_epoch(sess, testModel, Input_val)
                print("Epoch: %d/%d" % (i + 1, config.num_epoch))
                print("train accur: %.3f" % train_accur)
                #print("val accur: %.3f" %val_accur)
                pfile = open(savepath, 'a+')
                pfile.write("\nEpoch: %d/%d\n" % (i + 1, config.num_epoch))
                pfile.write("train accur: %.3f\n" % train_accur)
                #pfile.write("val accur: %.3f\n" %val_accur)
                pfile.close()

                test_accur = run_epoch(sess, testModel, Input_test)

                if test_accur > max_accur:
                    max_accur = test_accur

                print("test accur: %.3f\t max accur: %.3f" %
                      (test_accur, max_accur))
                pfile = open(savepath, 'a+')
                pfile.write("test accur: %.3f\t max accur: %.3f\n" %
                            (test_accur, max_accur))
                pfile.close()

                ### if validation accuracy decreased, decrease learning rate ###
                if (i >= 100 and i < 150):
                    trainModel.lr = config.learning_rate / 10
                elif (i >= 150):
                    trainModel.lr = config.learning_rate / 100
                '''
                count_epoch += 1
                if (test_accur < pre_val):
                    count += 1
                if count == 4 and num_change < 4 and count_epoch > 20: # 10
                    trainModel.lr /= 10
                    print('change learning rate: %g' %(trainModel.lr))
                    pfile = open(savepath, 'a+')
                    pfile.write("\nchange learning rate: %g\n" %trainModel.lr)
                    pfile.close()
                    num_change += 1
                    count = 0
                    count_epoch = 0
                pre_val = test_accur 
                '''
            print('best accuracy:', max_accur)
Beispiel #4
0
def main(config):
    if config.outf is None:
        config.outf = 'result'
    os.system('mkdir {0}'.format(config.outf))

    DataLoader = Dataset(config.dataset, config.datapath)
    dataset_train, dataset_test, All_test = DataLoader(config.validation)

    ### writing results ###
    filename = config.savename #+'_ver:'+str(config.version)+'_pad:'+str(config.padding)+'_norm:'+str(config.norm)
    savepath = os.path.join(config.outf, filename)
    pfile = open(savepath, 'w+')
    pfile.write('dataset: '+str(config.dataset)+'\n')
    pfile.write('batch size: '+str(config.batch_size)+'\n')
    pfile.write('initial learning rate: '+str(config.learning_rate)+'\n')
    pfile.write('validation split: '+str(config.validation)+'\n')
    pfile.write('regularization rate: '+str(config.beta)+'\n')
    pfile.write('n: '+str(config.num_layers)+'\n')
    pfile.close()

    with tf.Graph().as_default():
        Model = SparseResNet
        trainModel = Model(config, is_training = True)
        testModel = Model(config, is_training = False)


        with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))) as sess:
            init = tf.global_variables_initializer()
            #print(init.node_def)
            sess.run(init)
            print("initialize all variables")

            max_accur = [0, 0, 0]
            All_max = 0
            for lv in range(1,4):
                max = 0
                pre_val = 0
                count = 0 
                num_change = 0
                count_epoch = 0
                for i in range(config.num_epoch):
                    ### data shuffle ###
                    train_data, train_labels = dataset_train[lv-1][0], dataset_train[lv-1][1]

                    tmp = list(zip(train_data, train_labels))
                    random.shuffle(tmp)
                    train_data, train_labels = zip(*tmp)

                    Input_train = [train_data, train_labels]
                    Input_test = dataset_test[lv-1]

                    print("\nEpoch: %d/%d" %(i+1, config.num_epoch))

                    train_accur = run_epoch(sess, trainModel, Input_train, lv, True)
                    test_accur = run_epoch(sess, testModel, Input_test, lv)
                    all_accur = run_epoch(sess, testModel, All_test, 3)

                    if (test_accur > max):
                        max = test_accur
                    if (all_accur > All_max):
                        All_max = all_accur

                    print("lv%d - train accur: %.3f" %(lv, train_accur))
                    pfile = open(savepath, 'a+')
                    pfile.write("\nEpoch: %d/%d\n" %(i+1, config.num_epoch))
                    pfile.write("lv%d - train: %.3f\n" %(lv, train_accur))
                    pfile.close()

                    ### if validation accuracy decreased, decrease learning rate ###
                    if (i>=100 and i<150):
                            trainModel.lr = config.learning_rate / 10
                    elif (i>=150):
                            trainModel.lr = config.learning_rate / 100 
                    else:
                            trainModel.lr = config.learning_rate

                    print("lv%d - test accur: %.3f / max: %.3f" %(lv, test_accur, max))
                    print("All test accur: %.3f / max: %.3f" %(all_accur, All_max))
                    pfile = open(savepath, 'a+')
                    pfile.write("lv%d - test accur: %.3f / max: %.3f\n" %(lv, test_accur, max))
                    pfile.write("All test accur: %.3f / max: %.3f\n" %(all_accur, All_max))
                    pfile.close()
                max_accur[lv-1] = max
                print('lv%d - max accur: %.3f'%(lv, max))
            print('MAX lv1: %.3f lv2: %.3f lv3: %.3f'%(max_accur[0], max_accur[1], max_accur[2]))
            print("All test accur: %.3f" %(All_max))
Beispiel #5
0
    device = torch.device(device)
    word_embedding = word_embedding.to(device)
    image_encoder = image_encoder.to(device)
    image_decoder = image_decoder.to(device)

    """ 11) text file logging """
    log_filename = f"logs/training_log-BIGDATASET-BIGMODEL.log"
    logging.basicConfig(filename=log_filename, level=logging.DEBUG)

    EPOCHS = 100
    START_EPOCH = 0

    print("Beginning Training")
    for epoch in range(START_EPOCH, EPOCHS):
        # TRAIN
        results = run_epoch(epoch, train_loader, image_encoder, image_decoder, word_embedding, loss_fn, optim, device,
                            train=True)
        print(results.to_string(-1))
        logging.debug(results.to_string(-1))

        # VAL
        results = run_epoch(epoch, val_loader, image_encoder, image_decoder, word_embedding, loss_fn, optim, device,
                            train=False)
        print('Val ' + results.to_string(-1))
        logging.debug('Val ' + results.to_string(-1))

        # SAVE
        torch.save(word_embedding.state_dict(), f"checkpoints/BIGMODEL-BIGDATASET-weights-embedding-epoch-{epoch}.pt")
        torch.save(image_encoder.state_dict(), f"checkpoints/BIGMODEL-BIGDATASET-weights-encoder-epoch-{epoch}.pt")
        torch.save(image_decoder.state_dict(), f"checkpoints/BIGMODEL-BIGDATASET-weights-decoder-epoch-{epoch}.pt")

Beispiel #6
0
def main(config):
    if config.outf is None:
        config.outf = 'sample'
    os.system('mkdir {0}'.format(config.outf))

    DataLoader = Dataset(config.dataset, config.datapath, config.num_classes)
    Input_train, Input_val, Input_test = DataLoader(config.validation)

    ### writing results ###
    filename = config.savename + '_pad:' + str(
        config.padding) + '_norm:' + str(config.norm)
    savepath = os.path.join(config.outf, filename)
    pfile = open(savepath, 'w+')
    pfile.write('dataset: ' + str(config.dataset) + '\n')
    pfile.write('padding: ' + str(config.padding) + '\n')
    pfile.write('pixel norm: ' + str(config.norm) + '\n\n')
    pfile.write('num epoch: ' + str(config.num_epoch) + '\n')
    pfile.write('batch size: ' + str(config.batch_size) + '\n')
    pfile.write('initial learning rate: ' + str(config.learning_rate) + '\n')
    pfile.write('validation split: ' + str(config.validation) + '\n')
    pfile.write('regularization rate: ' + str(config.beta) + '\n')
    pfile.write('n: ' + str(config.num_layers) + '\n')
    pfile.close()

    with tf.Graph().as_default():
        trainModel = ResNet(config, is_training=True)
        testModel = ResNet(config, is_training=False)

        with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
                allow_growth=True))) as sess:
            init = tf.global_variables_initializer()
            #print(init.node_def)
            sess.run(init)
            print("initialize all variables")

            pre_val = 0
            count = 0
            count_epoch = 0
            num_change = 0
            for i in range(config.num_epoch):
                train_accur = run_epoch(sess,
                                        trainModel,
                                        Input_train,
                                        printOn=True)
                val_accur = run_epoch(sess, testModel, Input_val)
                print("Epoch: %d/%d" % (i + 1, config.num_epoch))
                print("train accur: %.3f" % train_accur)
                print("val accur: %.3f" % val_accur)
                pfile = open(savepath, 'a+')
                pfile.write("\nEpoch: %d/%d\n" % (i + 1, config.num_epoch))
                pfile.write("train accur: %.3f\n" % train_accur)
                pfile.write("val accur: %.3f\n" % val_accur)
                pfile.close()

                ### if validation accuracy decreased, decrease learning rate ###
                count_epoch += 1
                if (val_accur < pre_val):
                    count += 1
                '''
                else:
                    count = 0
                '''
                if count == 4 and num_change < 4 and count_epoch > 10:
                    trainModel.lr /= 10
                    print('change learning rate: %g' % (trainModel.lr))
                    pfile = open(savepath, 'a+')
                    pfile.write("\nchange learning rate: %g\n" % trainModel.lr)
                    pfile.close()
                    num_change += 1
                    count = 0
                    count_epoch = 0
                pre_val = val_accur

                test_accur = run_epoch(sess, testModel, Input_test)
                print("test accur: %.3f" % test_accur)
                pfile = open(savepath, 'a+')
                pfile.write("\ntest accur: %.3f\n" % test_accur)
                pfile.close()
Beispiel #7
0
def main(config):
    if config.outf is None:
        config.outf = 'result'
    os.system('mkdir {0}'.format(config.outf))

    DataLoader = Dataset(config.dataset, config.datapath, config.num_classes)
    DataLoader2 = Dataset(config.dataset, config.datapath, config.num_classes2)
    DataLoader3 = Dataset(config.dataset, config.datapath, config.num_classes3)
    Input_train, Input_test = DataLoader(config.validation)
    if config.train_mode != 1:
        Input_train2, Input_test2 = DataLoader2(config.validation)
        Input_train3, Input_test3 = DataLoader3(config.validation)
    else:
        Input_train2 = Input_train3 = Input_train
        Input_test2 = Input_test3 = Input_test

    ### writing results ###
    filename = config.savename  #+'_ver:'+str(config.version)+'_pad:'+str(config.padding)+'_norm:'+str(config.norm)
    savepath = os.path.join(config.outf, filename)
    pfile = open(savepath, 'w+')
    pfile.write('dataset: ' + str(config.dataset) + '\n')
    pfile.write('num epoch: ' + str(config.num_epoch) + '\n')
    pfile.write('batch size: ' + str(config.batch_size) + '\n')
    pfile.write('initial learning rate: ' + str(config.learning_rate) + '\n')
    pfile.write('validation split: ' + str(config.validation) + '\n')
    pfile.write('regularization rate: ' + str(config.beta) + '\n')
    pfile.write('n: ' + str(config.num_layers) + '\n')
    pfile.close()

    with tf.Graph().as_default():
        Model = SparseResNet
        trainModel = Model(config, is_training=True)
        testModel = Model(config, is_training=False)

        with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
                allow_growth=True))) as sess:
            init = tf.global_variables_initializer()
            #print(init.node_def)
            sess.run(init)
            print("initialize all variables")

            max1 = 0
            max2 = 0
            max3 = 0
            pre_val = 0
            pre_val2 = 0
            pre_val3 = 0
            count = 0
            num_change = 0
            count_epoch = 0
            for i in range(config.num_epoch):
                ### data shuffle ###
                train_data, train_labels = Input_train[0], Input_train[1]
                train_data2, train_labels2 = Input_train2[0], Input_train2[1]
                train_data3, train_labels3 = Input_train3[0], Input_train3[1]

                tmp = list(zip(train_data, train_labels))
                tmp2 = list(zip(train_data2, train_labels2))
                tmp3 = list(zip(train_data3, train_labels3))
                random.shuffle(tmp)
                random.shuffle(tmp2)
                random.shuffle(tmp3)
                train_data, train_labels = zip(*tmp)
                train_data2, train_labels2 = zip(*tmp2)
                train_data3, train_labels3 = zip(*tmp3)

                Input_train = [train_data, train_labels]
                Input_train2 = [train_data2, train_labels2]
                Input_train3 = [train_data3, train_labels3]

                train_accur1, train_accur2, train_accur3 = run_epoch(
                    sess, trainModel, Input_train, Input_train2, Input_train3,
                    config.train_mode, True)

                test_accur1, test_accur2, test_accur3 = run_epoch(
                    sess, testModel, Input_test, Input_test2, Input_test3,
                    config.train_mode)

                print("\nEpoch: %d/%d" % (i + 1, config.num_epoch))
                print("lv1 - train accur: %.3f" % train_accur1)
                print("lv2 - train accur: %.3f" % train_accur2)
                print("lv3 - train accur: %.3f" % train_accur3)
                pfile = open(savepath, 'a+')
                pfile.write("\nEpoch: %d/%d\n" % (i + 1, config.num_epoch))
                pfile.write("lv1 - train: %.3f\n" % train_accur1)
                pfile.write("lv2 - train: %.3f\n" % train_accur2)
                pfile.write("lv3 - train: %.3f\n" % train_accur3)
                pfile.close()

                ### if validation accuracy decreased, decrease learning rate ###
                count_epoch += 1
                if (test_accur1 < pre_val and test_accur2 < pre_val2
                        and test_accur3 < pre_val3):
                    count += 1
                if count >= 3 and num_change < 4 and count_epoch > 30:
                    trainModel.lr /= 10
                    trainModel.lr2 /= 10
                    trainModel.lr3 /= 10
                    print('lv-1 learning rate %g:' % (trainModel.lr))
                    print('lv-2 learning rate %g:' % (trainModel.lr2))
                    print('lv-3 learning rate %g:' % (trainModel.lr3))
                    pfile = open(savepath, 'a+')
                    pfile.write('\nChange Learning Rate')
                    pfile.write('\nlv-1 learning rate: %g\n' % trainModel.lr)
                    pfile.write('\nlv-2 learning rate: %g\n' % trainModel.lr2)
                    pfile.write('\nlv-3 learning rate: %g\n' % trainModel.lr3)
                    pfile.close()
                    num_change += 1
                    count = 0
                    count_epoch = 0
                pre_val = test_accur1
                pre_val2 = test_accur2
                pre_val3 = test_accur3

                print("lv1 - test accur: %.3f" % test_accur1)
                print("lv2 - test accur: %.3f" % test_accur2)
                print("lv3 - test accur: %.3f" % test_accur3)
                pfile = open(savepath, 'a+')
                pfile.write("lv1 - test accur: %.3f\n" % test_accur1)
                pfile.write("lv2 - test accur: %.3f\n" % test_accur2)
                pfile.write("lv3 - test accur: %.3f\n" % test_accur3)
                pfile.close()

                if (test_accur1 > max1):
                    max1 = test_accur1
                if (test_accur2 > max2):
                    max2 = test_accur2
                if (test_accur3 > max3):
                    max3 = test_accur3
                print("max: %.3f / %.3f / %.3f\n" % (max1, max2, max3))