parser.add_argument("--minibatch", dest="minibatch", type=int, default=30) parser.add_argument("--optimizer", dest="optimizer", type=str, default="adagrad") parser.add_argument("--epochs", dest="epochs", type=int, default=20) parser.add_argument("--step", dest="step", type=float, default=0.01) parser.add_argument("--hiddenDim", dest="hiddenDim", type=int, default=50) parser.add_argument("--lstmDim", dest="lstmDim", type=int, default=30) parser.add_argument("--task", dest="task", type=str, default=None) args = parser.parse_args() print ("Loading data...") base_dir = os.path.dirname(os.path.realpath(__file__)) data_dir = os.path.join(base_dir, "data") sick_dir = os.path.join(data_dir, "sick") X1_train, X1_mask_train, X2_train, X2_mask_train, Y_labels_train, Y_scores_train, Y_scores_pred_train = read_sequence_dataset( sick_dir, "train" ) X1_dev, X1_mask_dev, X2_dev, X2_mask_dev, Y_labels_dev, Y_scores_dev, Y_scores_pred_dev = read_sequence_dataset( sick_dir, "dev" ) X1_test, X1_mask_test, X2_test, X2_mask_test, Y_labels_test, Y_scores_test, Y_scores_pred_test = read_sequence_dataset( sick_dir, "test" ) input1_var = T.imatrix("inputs_1") input2_var = T.imatrix("inputs_2") input1_mask_var = T.matrix("inputs_mask_1") input2_mask_var = T.matrix("inputs_mask_2") target_var = T.fmatrix("targets") wordEmbeddings = loadWord2VecMap(os.path.join(sick_dir, "word2vec.bin"))
parser = argparse.ArgumentParser(description="Usage") parser.add_argument("--minibatch", dest="minibatch", type=int, default=30) parser.add_argument("--optimizer", dest="optimizer", type=str, default="adagrad") parser.add_argument("--epochs", dest="epochs", type=int, default=2) parser.add_argument("--step", dest="step", type=float, default=0.01) parser.add_argument("--hiddenDim", dest="hiddenDim", type=int, default=50) args = parser.parse_args() # Load the dataset print("Loading data...") base_dir = os.path.dirname(os.path.realpath(__file__)) data_dir = os.path.join(base_dir, "data") X_train, X_mask_train, Y_labels_train = read_sequence_dataset(data_dir, "train") X_dev, X_mask_dev, Y_labels_dev = read_sequence_dataset(data_dir, "dev") X_test, X_mask_test, Y_labels_test = read_sequence_dataset(data_dir, "test") input_var = T.imatrix("inputs") target_var = T.fmatrix("targets") wordEmbeddings = loadWord2VecMap(os.path.join(data_dir, "word2vec.bin")) wordEmbeddings = wordEmbeddings.astype(np.float32) train_fn, val_fn = build_network_2dconv(args, input_var, target_var, wordEmbeddings) print("Starting training...") best_val_acc = 0 best_val_pearson = 0 for epoch in range(args.epochs):
parser.add_argument("--epochs",dest="epochs",type=int,default=20) parser.add_argument("--step",dest="step",type=float,default=0.01) parser.add_argument("--hiddenDim",dest="hiddenDim",type=int,default=50) parser.add_argument("--lstmDim",dest="lstmDim",type=int,default=30) parser.add_argument("--task",dest="task",type=str,default=None) args = parser.parse_args() print("Loading data...") base_dir = os.path.dirname(os.path.realpath(__file__)) data_dir = os.path.join(base_dir, 'data') sick_dir = os.path.join(data_dir, 'sick') wordEmbeddings = loadWord2VecMap(os.path.join(sick_dir, 'word2vec.bin')) X1_train, X1_mask_train, X2_train, X2_mask_train, Y_labels_train, Y_scores_train, Y_scores_pred_train = \ read_sequence_dataset(sick_dir, "train") X1_dev, X1_mask_dev, X2_dev, X2_mask_dev, Y_labels_dev, Y_scores_dev, Y_scores_pred_dev = \ read_sequence_dataset(sick_dir, "dev") X1_test, X1_mask_test, X2_test, X2_mask_test, Y_labels_test, Y_scores_test, Y_scores_pred_test = \ read_sequence_dataset(sick_dir, "test") wordEmbeddings = loadWord2VecMap(os.path.join(sick_dir, 'word2vec.bin')) wordEmbeddings = wordEmbeddings.astype(np.float32) train_fn, val_fn, predict_proba= build_network(args, wordEmbeddings) print("Starting training...") best_val_acc = 0 best_val_pearson = 0 for epoch in range(args.epochs): train_err = 0