def test(data, lookBack, epoch, lr, batchSize, method, modelPath):

    # 归一化数据
    scaler = MinMaxScaler(feature_range=(0, 1))
    dataset = scaler.fit_transform(data)

    # 分割序列为样本, 支持RNN或者普通样本形式
    trainData, testData = divideTrainTest(dataset)

    flag = True
    trainX, trainY = createSamples(trainData, lookBack, RNN=flag)
    testX, testY = createSamples(testData, lookBack, RNN=flag)
    print("testX shape:", testX.shape)
    print("testy shape:", testY.shape)
    print("trainX shape:", trainX.shape)
    print("trainy shape:", trainY.shape)

    train(trainX,
          trainY,
          epoch=epoch,
          lr=lr,
          batchSize=batchSize,
          modelPath=modelPath,
          lookBack=lookBack,
          method=method)

    testPred = predict(testX, MODEL_PATH)
    trainPred = predict(trainX, MODEL_PATH)
    print("testPred shape:", testPred.shape)
    print("trainPred shape:", trainPred.shape)

    testPred = scaler.inverse_transform(testPred)
    testY = scaler.inverse_transform(testY)

    MAE = eval.calcMAE(testY, testPred)
    print("test MAE", MAE)
    MRSE = eval.calcRMSE(testY, testPred)
    print("test RMSE", MRSE)
    # MAPE = eval.calcMAPE(testY, testPred)
    # print("test MAPE", MAPE)
    SMAPE = eval.calcSMAPE(testY, testPred)
    print("test SMAPE", SMAPE)

    return trainPred, testPred, MAE, MRSE, SMAPE
Esempio n. 2
0
def main():
    
    args = argument_parser().parse_args()
    random.seed(args.seed)

    # Select data set
    if args.dataset   == "Heart":
    	feats = 13
    elif args.dataset == "Wine":
    	feats = 12
    elif args.dataset == "Abalone":
    	feats = 8
    elif args.dataset == "Telescope":
    	feats = 10
    else:
    	raise ValueError("Dataset argument must be one of: Wine, Abalone, Telescope, Heart")

    
    # Initialize Chameleon component
    if args.permuter:
        print("Initializing Chameleon")
        mEnc        = Chameleon(input_shape=[args.inner_batch, feats],activation=tf.nn.relu,permMatrix=args.perm_matrix,regC=args.reg_columns,num_filter=args.num_filter,num_conv=args.num_conv,num_neurons=args.num_neurons,conv_act=args.conv_act,name_suf="Perm")
        mEnc.build()
        model_input = mEnc.out
    else:
        mEnc = None
        model_input = None

    # Initialize base model ŷ for reptile and scratch training
    repModel  = BaseModel(size=feats,layers=2,num_classes=2,num_neurons=64,input_ph=model_input,name_suf="RepMod")
    evalModel = BaseModel(size=feats,layers=2,num_classes=2,num_neurons=64,input_ph=None       ,name_suf="EvalMod")

    with tf.Session() as sess:
        
        print('Training...')
        try:
            train(sess, repModel,evalModel,mEnc,args.checkpoint, **train_kwargs(args))
        except TypeError:
            print("-----------------------------------------------------------------")
            raise
Esempio n. 3
0
def run(model_list, mode, **kwargs):
    records = {}
    results_path = get_path('data') + '/results.csv'
    clean_data_path = get_path('data') + '/clean_data.csv'
    print("TRAINING : {}".format(mode))
    for model in model_list:
        print("Current Model : {}".format(model))
        score, duration = train(clf=model, data=clean_data_path, name="{}_{}".format(model[0], mode), **kwargs)
        records.update({
            'date': datetime.now(),
            'f1': score,
            'mode': mode,
            'duration': duration,
            'model_name': model[0],
        })
        print("{0} took {1} seconds.".format(model, duration))
        with open(results_path, 'a') as f:
            w = csv.DictWriter(f, records.keys())
            w.writerow(records)
Esempio n. 4
0
def finetune(opt): 
    start_time = time.time()
    if opt.T != None: 
        opt.niter = opt.T

    txtimg_finetune, txtlabel_finetune, txtimg_test, txtlabel_test = load_dataset(opt.test_dataset)

    # set up for all test videos, to finetune on 85% of the the video, then test on the other 15% and compute ssim and metrics
    checkpoint_names = []
    # copy checkpoints_dir 8 times in order to have fresh check updating for each one
    for i in range(8):
        ckpt_i = opt.exp_root_dir + "/checkpoints/test_" + str(i+1)
        os.makedirs(ckpt_i, exist_ok=True)
        checkpoint_names.append(ckpt_i)
        # create viz_directories
        os.makedirs(opt.exp_root_dir + "/viz/" + str(i+1), exist_ok=True)
        os.makedirs(opt.exp_root_dir + "/viz/"+ str(i+1) + "/generated/", exist_ok= True)


    scores = np.zeros((len(txtlabel_finetune), 3))

    os.makedirs(opt.exp_root_dir + "/results", exist_ok=True)
    orig_batch_size = opt.batchSize
    orig_epoch = opt.which_epoch
    # for each video
    for i in range(opt.start_FT_vid-1, len(txtlabel_finetune)):
        print("---Finetuning Video " + str(i))
        # finetuning --continue training option, batchSize 1 for divisibility purposes
        # change options for training
        opt.txtfile_label = txtlabel_finetune[i]
        opt.txtfile_img = txtimg_finetune[i]
        opt.checkpoints_dir = checkpoint_names[i]
        opt.which_epoch = 'latest'
        opt.isTrain = True
        opt.batchSize = orig_batch_size
        opt.which_epoch = orig_epoch
        train(opt)
        time.sleep(5)
        # Inference
        print("---Testing Video " + str(i))
        opt.isTrain = False
        opt.which_epoch = 'latest'
        opt.results_dir = opt.exp_root_dir + "/results/test_" + str(i+1)
        opt.txtfile_label = txtlabel_test[i]
        opt.txtfile_img = txtimg_test[i]
        opt.viz_dir = opt.exp_root_dir + "/viz/" + str(i+1) + "/"
        opt.viz_dir_gen = opt.exp_root_dir + "/viz/"+ str(i+1) + "/generated/"
        scores[i] = test(opt)
        print(scores[i])


    # avg scores across all videos
    
    mean_scores = scores.mean(axis = 0)
    dic = {"scores matrix": scores, "mean": mean_scores}
    with open(opt.exp_root_dir + '/results.pkl', 'wb') as f:
        pickle.dump(dic, f)
    end = time.time()
    print("Execution Time: " + str(end-start_time))
    print("Scores matrix: ")
    print(scores)
    print("Score Means: SSIM, PSNR, MSE")
    print(mean_scores)
    return mean_scores
Esempio n. 5
0
 def _train(args):
     from code.train import train
     train(args.model_file, args.train_file, args.ud_version,
           args.ignore_forms, args.ignore_lemmas, args.ignore_morph,
           args.epochs, args.batch_size, args.dev_file, args.keep,
           args.forms_word2vec, args.lemmas_word2vec)
Esempio n. 6
0
def main():

    args = argument_parser().parse_args()
    random.seed(args.seed)

    print("########## argument sheet ########################################")
    for arg in vars(args):
        print(f"#{arg:>15}  :  {str(getattr(args, arg))} ")
    print("##################################################################")

    # Select data set
    dataset = genFewShot(os.path.join(args.data_dir, args.dataset))
    testFeats = list(
        dataset.setFeatures(ast.literal_eval(args.num_test_features),
                            ratio=args.test_feat_ratio))
    dataset.splitTrainVal(.25)
    # Creating fixed set of test tasks
    dataset.generateDataset(dataset.totalLabels,
                            ast.literal_eval(args.min_feats),
                            ast.literal_eval(args.max_feats),
                            args.inner_batch,
                            args.inner_batch,
                            args.meta_batch,
                            number_of_mbs=100,
                            test=True,
                            oracle=True)

    feats = len(dataset.train_f)

    if not dataset.special:
        print(
            "###################################### Reptile Oracle ####################################"
        )
        # Evaluate Reptile only with already realigned tasks
        tf.reset_default_graph()
        mEnc = None
        model_input = None
        oracle = True
        feats = dataset.totalFeatures

        repModel = BaseModel(size=feats,
                             layers=ast.literal_eval(args.base_layers),
                             num_classes=dataset.totalLabels,
                             input_ph=model_input,
                             name_suf="RepMod")
        with tf.Session() as sess:
            print('Training...')

            train(sess,
                  repModel,
                  None,
                  mEnc,
                  args.checkpoint,
                  dataset,
                  oracle,
                  False,
                  **train_kwargs(args),
                  name_affix="Reptile_Oracle")

    #### Train and evaluate Chameleon + Reptile
    print(
        "############################### Chameleon + Reptile #####################################"
    )
    # Create Chameleon component
    tf.reset_default_graph()
    print("Initializing Chameleon")
    mEnc = Chameleon(num_instances=args.inner_batch * dataset.totalLabels,
                     maxK=feats,
                     activation=tf.nn.relu,
                     name_suf="Perm",
                     conv_layers=ast.literal_eval(args.conv_layers))
    mEnc.build()
    model_input = mEnc.out
    oracle = False

    # Initialize base model ŷ for reptile and scratch training
    repModel = BaseModel(size=feats,
                         layers=ast.literal_eval(args.base_layers),
                         num_classes=dataset.totalLabels,
                         input_ph=model_input,
                         name_suf="RepMod")
    evalModel = BaseModel(size=feats,
                          layers=ast.literal_eval(args.base_layers),
                          num_classes=dataset.totalLabels,
                          input_ph=None,
                          name_suf="EvalMod")

    with tf.Session() as sess:
        print('Training...')
        try:
            train(sess,
                  repModel,
                  evalModel,
                  mEnc,
                  args.checkpoint,
                  dataset,
                  oracle,
                  False,
                  **train_kwargs(args),
                  name_affix="Reptile_Chameleon")
        except TypeError:
            raise

    print(
        "####################### Untrained Chameleon + Reptile #####################################"
    )
    tf.reset_default_graph()
    # Create Chameleon component
    print("Initializing Chameleon")
    mEnc = Chameleon(num_instances=args.inner_batch * dataset.totalLabels,
                     maxK=feats,
                     activation=tf.nn.relu,
                     name_suf="Perm",
                     conv_layers=ast.literal_eval(args.conv_layers))
    mEnc.build()
    model_input = mEnc.out
    oracle = False

    # Initialize base model ŷ for reptile and scratch training
    repModel = BaseModel(size=feats,
                         layers=ast.literal_eval(args.base_layers),
                         num_classes=dataset.totalLabels,
                         input_ph=model_input,
                         name_suf="RepMod",
                         untrained=True)
    evalModel = BaseModel(size=feats,
                          layers=ast.literal_eval(args.base_layers),
                          num_classes=dataset.totalLabels,
                          input_ph=None,
                          name_suf="EvalMod")

    with tf.Session() as sess:
        print('Training...')
        try:
            param_dic = train_kwargs(args)
            param_dic['perm_epochs'] = 0
            train(sess,
                  repModel,
                  evalModel,
                  mEnc,
                  args.checkpoint,
                  dataset,
                  oracle,
                  False,
                  **param_dic,
                  name_affix="Repltile_Chameleon-Unt")
        except TypeError:
            raise

    if args.freeze:
        print(
            "######################## Frozen Chameleon + Reptile #####################################"
        )
        # Create Chameleon component
        tf.reset_default_graph()
        print("Initializing Chameleon")
        mEnc = Chameleon(num_instances=args.inner_batch * dataset.totalLabels,
                         maxK=feats,
                         activation=tf.nn.relu,
                         name_suf="Perm",
                         conv_layers=ast.literal_eval(args.conv_layers))
        mEnc.build()
        model_input = mEnc.out
        oracle = False

        # Initialize base model ŷ for reptile and scratch training
        repModel = BaseModel(size=feats,
                             layers=ast.literal_eval(args.base_layers),
                             num_classes=dataset.totalLabels,
                             input_ph=model_input,
                             name_suf="RepMod",
                             frozen=True)
        evalModel = BaseModel(size=feats,
                              layers=ast.literal_eval(args.base_layers),
                              num_classes=dataset.totalLabels,
                              input_ph=None,
                              name_suf="EvalMod")

        with tf.Session() as sess:
            print('Training...')
            try:
                train(sess,
                      repModel,
                      evalModel,
                      mEnc,
                      args.checkpoint,
                      dataset,
                      oracle,
                      args.freeze,
                      **train_kwargs(args),
                      name_affix="Reptile_Chameleon-Froz")
            except TypeError:
                raise

    print(
        "###################################### Reptile Padded ####################################"
    )
    # Evaluate Reptile only with padded tasks
    tf.reset_default_graph()
    mEnc = None
    model_input = None
    repModel = BaseModel(size=feats,
                         layers=ast.literal_eval(args.base_layers),
                         num_classes=dataset.totalLabels,
                         input_ph=model_input,
                         name_suf="RepMod")
    with tf.Session() as sess:
        print('Training...')
        try:
            train(sess,
                  repModel,
                  None,
                  mEnc,
                  args.checkpoint,
                  dataset,
                  oracle,
                  False,
                  **train_kwargs(args),
                  name_affix="Reptile_Pad")
        except TypeError:
            raise
Esempio n. 7
0
def meta_train(opt):
    opt.isTrain = True
    # will load pretrained model if --load_pretrain is set to something
    model = create_model(opt)
    G_dict, D_dict = model.module.return_dicts()
    Pre_D_dict = deepcopy(D_dict)
    writer = tf.summary.FileWriter(
        os.path.join(opt.exp_root_dir, "meta_loss_log"))

    # Reptile training loop
    # will only load the model initial_weights
    opt.load_pretrain = ""
    txtimg_train, txtlabel_train = load_train_dataset(opt.train_dataset)
    checkpoints_dir = opt.checkpoints_dir
    old_opt = deepcopy(opt)
    old_exp_root = opt.exp_root_dir
    for iteration in range(opt.start_meta_iter, opt.meta_iter):
        opt = deepcopy(old_opt)
        G_dict_before = deepcopy(G_dict)
        D_dict_before = deepcopy(D_dict)

        # Generate task
        idx = np.random.randint(len(txtlabel_train))
        torch.cuda.set_device(0)
        opt.init_weights = True
        opt.txtfile_label = txtlabel_train[idx]
        opt.txtfile_img = txtimg_train[idx]
        opt.checkpoints_dir = old_exp_root + "/checkpoints_" + str(
            iteration) + "/"
        os.makedirs(opt.checkpoints_dir, exist_ok=True)
        opt.isTrain = True
        opt.load_pretrain = ""
        opt.G_dict = G_dict
        if opt.only_generator:
            print("--loaded pretrained discriminator")
            opt.D_dict = Pre_D_dict
        else:
            opt.D_dict = D_dict

        # I am passing in init_weights to load the latest.pth model weights as initial weights
        # also passing in the dictionaries to load so we don't have to save them
        model, loss_dict, total_loss_dict = train(opt)

        plot_current_errors(writer, loss_dict, iteration, total_loss_dict)
        G_dict, D_dict = model.module.return_dicts()  # grab dictionaries
        opt.epsilon = opt.epsilon * (1 - iteration / opt.meta_iter
                                     )  # linear schedule

        G_dict = {
            name: G_dict_before[name] +
            (G_dict[name] - G_dict_before[name]) * opt.epsilon
            for name in G_dict_before
        }
        D_dict = {
            name: D_dict_before[name] +
            (D_dict[name] - D_dict_before[name]) * opt.epsilon
            for name in D_dict_before
        }

        if iteration % opt.save_meta_iter == 0 and iteration != 0:
            save_meta_weights(checkpoints_dir, G_dict, D_dict, 'latest')

        if iteration % opt.test_meta_iter == 0 and iteration != 0:
            save_meta_weights(checkpoints_dir, G_dict, D_dict, str(iteration))
            # testing
            opt.init_weights = False
            opt.load_pretrain = checkpoints_dir
            opt.which_epoch = 'latest'
            opt.mode = 'finetune'
            opt.init_weights = False
            opt.exp_root_dir = opt.exp_root_dir + "/finetune_" + str(iteration)
            os.makedirs(opt.exp_root_dir, exist_ok=True)
            mean_scores = finetune(opt)
            plot_testing(writer, mean_scores, iteration)

    save_meta_weights(checkpoints_dir, G_dict, D_dict, 'latest')
Esempio n. 8
0
def main(opt):
    # creating root directory for experiment
    opt.exp_root_dir = os.path.join("data-meta/experiments/", opt.name)
    try:
        os.makedirs(opt.exp_root_dir)
    except FileExistsError:
        check_if_delete(opt)
    # sometimes test directories that already have checkpoints
    if opt.mode != 'test_checkpoints' and opt.mode != 'test-one':
        opt.checkpoints_dir = os.path.join(opt.exp_root_dir, "checkpoints")
        os.makedirs(opt.checkpoints_dir, exist_ok=True)
    make_opt_txt(opt)

    # mode specific launching
    if opt.mode == 'train':
        print("--Training " + opt.exp_root_dir)
        A, B = load_train_dataset(opt.train_dataset)
        opt.isTrain = True
        opt.txtfile_label = B[0]
        opt.txtfile_img = A[0]
        train(opt)
        print("[Launcher] Finished Training " + opt.name)
    elif opt.mode == "meta-train":
        if opt.T != None:
            opt.niter = opt.T
        meta_train(opt)
    elif opt.mode == 'finetune':
        finetune(opt)
    elif opt.mode == 'test':
        checkpoints_other = opt.checkpoints_dir
        for i in range(8):
            os.makedirs(opt.exp_root_dir + f"/viz/{i+1}/generated/",
                        exist_ok=True)
        # Inference
        txtimg_finetune, txtlabel_finetune, txtimg_test, txtlabel_test = load_dataset(
            opt.test_dataset)
        scores = np.zeros((len(txtlabel_finetune), 3))
        os.makedirs(opt.exp_root_dir + "/results", exist_ok=True)
        for i in range(opt.start_FT_vid - 1, 8):
            print("---Testing Video " + str(i))
            opt.checkpoints_dir = os.path.join(checkpoints_other,
                                               f"test_{i+1}")
            opt.isTrain = False
            opt.results_dir = opt.exp_root_dir + f"/results/test_{i+1}"
            opt.txtfile_label = txtlabel_test[i]
            opt.txtfile_img = txtimg_test[i]
            opt.viz_dir = opt.exp_root_dir + f"/viz/{i+1}/"
            opt.viz_dir_gen = opt.exp_root_dir + f"/viz/{i+1}/generated/"
            scores[i] = test(opt)
    elif opt.mode == 'test_checkpoints':
        run_test(opt)
    elif opt.mode == 'test_all_list':
        """
        Test an weights at a certain epoch to observe finetuning
        """
        for j in [10, 20, 30, 40, 50, 80, 100, 200]:
            txtimg_finetune, txtlabel_finetune, txtimg_test, txtlabel_test = load_dataset(
                opt.test_dataset)
            scores = np.zeros((len(txtlabel_finetune), 3))
            os.makedirs(opt.exp_root_dir + "/results", exist_ok=True)
            i = opt.one_vid
            os.makedirs(opt.exp_root_dir + f"/{j}/viz/{i}", exist_ok=True)
            os.makedirs(opt.exp_root_dir + f"/{j}/viz/{i}/generated/",
                        exist_ok=True)
            print(f"---Testing Video " + str(opt.one_vid) + f"\t At epoch {j}")
            opt.which_epoch = j
            opt.isTrain = False
            opt.results_dir = opt.exp_root_dir + f"/results/test_{i}"
            opt.txtfile_label = txtlabel_test[i - 1]
            opt.txtfile_img = txtimg_test[i - 1]
            opt.viz_dir = opt.exp_root_dir + f"/{j}/viz/{i}/"
            opt.viz_dir_gen = opt.exp_root_dir + f"/{j}/viz/{i}/generated/"
            scores[i] = test(opt)
    elif opt.mode == 'test_theta_init':
        # takes in any weights, create a directory that can test the weights, and save to same file for testing purposes
        for i in range(8):
            os.makedirs(opt.exp_root_dir + f"/viz/{i+1}/generated/",
                        exist_ok=True)
        if opt.one_vid == 0:
            # Inference
            txtimg_finetune, txtlabel_finetune, txtimg_test, txtlabel_test = load_dataset(
                opt.test_dataset)
            scores = np.zeros((len(txtlabel_finetune), 3))
            os.makedirs(opt.exp_root_dir + "/results", exist_ok=True)
            for i in range(opt.start_FT_vid - 1, 8):
                print(f"---Testing Video {i+1}")
                opt.isTrain = False
                opt.results_dir = opt.exp_root_dir + f"/results/test_{i+1}"
                opt.txtfile_label = txtlabel_test[i]
                opt.txtfile_img = txtimg_test[i]
                opt.viz_dir = opt.exp_root_dir + f"/viz/{i+1}/"
                opt.viz_dir_gen = opt.exp_root_dir + f"/viz/{i+1}/generated/"
                scores[i] = test(opt)
        else:
            txtimg_finetune, txtlabel_finetune, txtimg_test, txtlabel_test = load_dataset(
                opt.test_dataset)
            scores = np.zeros((len(txtlabel_finetune), 3))
            os.makedirs(opt.exp_root_dir + "/results", exist_ok=True)
            i = opt.one_vid
            print(f"---Testing Video {opt.one_vid+1}")
            opt.isTrain = False
            opt.results_dir = opt.exp_root_dir + f"/results/test_{i}"
            opt.txtfile_label = txtlabel_test[i - 1]
            opt.txtfile_img = txtimg_test[i - 1]
            opt.viz_dir = opt.exp_root_dir + f"/viz/{i}/"
            opt.viz_dir_gen = opt.exp_root_dir + f"/viz/{i}/generated/"
            scores[i] = test(opt)
    else:
        print("not a valid mode, quitting")
Esempio n. 9
0
from code.train import train
from opt import Options

if __name__ == "__main__":
    opt = Options().parse()
    train(opt, opt.model_type)