Exemple #1
0
def main():
    os.chdir('/home/seonhoon/Desktop/workspace/ImageQA/data/')

    n_vocab = 12047
    y_vocab = 430
    dim_word = 1024
    dim = 1024
    maxlen = 60
    
    train=pd.read_pickle('train_vgg.pkl')



    train_x=[ q for q in train['q'] ]
    train_y=[ a[0] for a in train['a'] ]
    train_y=np.array(train_y)[:,None]
    train_y = np_utils.to_categorical(train_y, y_vocab).astype('int32')
    train_x , train_x_mask = prepare_data(train_x, maxlen)
    train_x_img = np.array([ img.tolist() for img in train['cnn_feature'] ]).astype('float32')

    
    print 'x :', train_x.shape
    print 'x_mask:', train_x_mask.shape
    print 'x_img:', train_x_img.shape
    print 'y : ', train_y.shape
    model = RNN(n_vocab, y_vocab, dim_word, dim)

    model.train(train_x, train_x_mask, train_x_img, train_y, batch_size=512, epoch=50, save=15)
Exemple #2
0
def main():

    os.chdir('/home/seonhoon/Desktop/workspace/ImageQA/data/')

    n_vocab = 12047
    y_vocab = 430
    dim_word = 1024
    dim = 1024
    maxlen = 60
    
    test=pd.read_pickle('test_vgg.pkl')

    test_x=[ q for q in test['q'] ]
    test_y=[ a[0] for a in test['a'] ]
    test_y_original=test_y
    test_y=np.array(test_y)[:,None]
    test_y = np_utils.to_categorical(test_y, y_vocab)
    test_x , test_x_mask = prepare_data(test_x, maxlen)
    test_x_img = [ img.tolist() for img in test['cnn_feature'] ]

    
    print 'x :', test_x.shape
    print 'x_mask:', test_x_mask.shape
    print 'y : ', test_y.shape
    model = RNN(n_vocab, y_vocab, dim_word, dim)

    pred_y = model.prediction(test_x, test_x_mask, test_x_img, test_y, batch_size=2048)
    
    print pred_y[:10], len(pred_y)
    print test_y_original[:10], len(test_y_original)
    
    correct = 0 
    for i in range(len(pred_y)):
        if pred_y[i]==test_y_original[i] : 
            correct += 1
    print 'accuracy : ', float(correct) / len(pred_y)
Exemple #3
0
print('  vocabulary size: {}'.format(vocab_size))


###############################################################################
# 
# MODEL SETUP
#
###############################################################################

# NOTE ==============================================
# This is where your model code will be called. You may modify this code
# if required for your implementation, but it should not typically be necessary,
# and you must let the TAs know if you do so.
if args.model == 'RNN':
    model = RNN(emb_size=args.emb_size, hidden_size=args.hidden_size, 
                seq_len=args.seq_len, batch_size=args.batch_size,
                vocab_size=vocab_size, num_layers=args.num_layers, 
                dp_keep_prob=args.dp_keep_prob) 
elif args.model == 'GRU':
    model = GRU(emb_size=args.emb_size, hidden_size=args.hidden_size, 
                seq_len=args.seq_len, batch_size=args.batch_size,
                vocab_size=vocab_size, num_layers=args.num_layers, 
                dp_keep_prob=args.dp_keep_prob)
elif args.model == 'TRANSFORMER':
    if args.debug:  # use a very small model
        model = TRANSFORMER(vocab_size=vocab_size, n_units=16, n_blocks=2)
    else:
        # Note that we're using num_layers and hidden_size to mean slightly 
        # different things here than in the RNNs.
        # Also, the Transformer also has other hyperparameters 
        # (such as the number of attention heads) which can change it's behavior.
        model = TRANSFORMER(vocab_size=vocab_size, n_units=args.hidden_size, 
Exemple #4
0
def main():
    parse = argparse.ArgumentParser(
        description='Audio Classification Training')
    parse.add_argument('class_name',
                       type=str,
                       default='Gender',
                       help='The class name to train you model')
    parse.add_argument('model', choices=['conv', 'rnn'])
    parse.add_argument('features', choices=['stft', 'mfcc', 'cwt'])
    parse.add_argument('lng', choices=['cnh', 'eng'])
    parse.add_argument('-optimizer', choices=['sgd', 'adam'], default='adam')
    parse.add_argument('-cuda', action='store_true')
    # parse.add_argument('-clean', type=bool, default=False, help='To pre processing, remove low frequencies based on
    # threshold value')
    parse.add_argument('-dropout', type=int, default=0.5)
    parse.add_argument('-epochs',
                       type=int,
                       default=10,
                       help='Number of epochs to train for')
    parse.add_argument('-threshold',
                       type=int,
                       default=0.005,
                       help='Remove low frequencies below to')
    parse.add_argument('-nfilt', type=int, default=26, help='Number of filter')
    parse.add_argument('-nfeat', type=int, default=13)
    parse.add_argument('-nfft', type=int, default=1103)
    parse.add_argument('-rate',
                       type=int,
                       default=16000,
                       help='Down sample rate to')
    parse.add_argument('-report', type=bool, default=False, help='Plot report')

    opt = parse.parse_args()
    # device = torch.device("cuda:0" if torch.cuda.is_available() and opt.cuda else "cpu")
    # print(device)

    mod = None
    path_short = "cnh/"
    path_full = "data/cnh/"
    csv = "label_cnh.csv"

    if opt.lng == "eng":
        path_short = "eng/"
        path_full = "data/eng/"
        csv = "label_eng.csv"

    # if opt.clean:
    #     df_ = read_data()
    #     clean(df_, opt)

    df = read_data(path=path_short, csv=csv)
    x, y = build_features(df, opt=opt, path=path_full)
    input_shape = (x.shape[1], x.shape[2],
                   1) if opt.model == 'conv' else (x.shape[1], x.shape[2])
    y_flat = np.argmax(y, axis=1)
    weight = compute_class_weight('balanced', np.unique(y_flat), y_flat)
    class_weight = dict(enumerate(weight.flatten()))

    if opt.report:
        x_, x__, fft_, fbanks, mfccs, stf_f = features(df=df,
                                                       opt=opt,
                                                       path=path_full)
        distribution_plt(group(df, opt.class_name))
        signal_plt(x_, 'Time Series', 'tm')
        signal_plt(x__, 'Time Series ENVELOPE', 'tmenv')

    if opt.model == 'rnn':
        mod = RNN(input_shape, opt.optimizer)
    elif opt.model == 'conv':
        mod = CONV(input_shape, opt.optimizer)

    # if "cuda" in mod.device.type: torch.cuda.empty_cache()
    history = mod.fit(x,
                      y,
                      validation_split=0.50,
                      batch_size=4,
                      epochs=opt.epochs,
                      class_weight=class_weight)

    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Loss model')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'val'], loc='upper left')
    plt.savefig('plots/loss_loss.png')
    plt.show()

    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('Accuracy model')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'val'], loc='upper left')
    plt.savefig('plots/acc_loss.png')
    plt.show()
###############################################################################
#
# MODEL SETUP
#
###############################################################################

# NOTE ==============================================
# This is where your model code will be called. You may modify this code
# if required for your implementation, but it should not typically be necessary,
# and you must let the TAs know if you do so.
if args.model == 'RNN':
    model = RNN(emb_size=args.emb_size,
                hidden_size=args.hidden_size,
                seq_len=args.seq_len,
                batch_size=args.batch_size,
                vocab_size=vocab_size,
                num_layers=args.num_layers,
                dp_keep_prob=args.dp_keep_prob)
elif args.model == 'GRU':
    model = GRU(emb_size=args.emb_size,
                hidden_size=args.hidden_size,
                seq_len=args.seq_len,
                batch_size=args.batch_size,
                vocab_size=vocab_size,
                num_layers=args.num_layers,
                dp_keep_prob=args.dp_keep_prob)
elif args.model == 'TRANSFORMER':
    if args.debug:  # use a very small model
        model = TRANSFORMER(vocab_size=vocab_size, n_units=16, n_blocks=2)
    else:
Exemple #6
0
from models import RNN
from models import CNN_emotions
import models
import torch
import torch.nn.functional as F
from torch.utils.model_zoo import load_url
from base64 import b64encode
import age_class

inception_url = 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'

cnn = getCNN()
cnn.load_state_dict(load_url(inception_url, map_location=torch.device('cpu')))
cnn = cnn.train(False)

rnn = RNN()
rnn.load_state_dict(torch.load('net_param.pt', torch.device('cpu')))
rnn = rnn.train(False)

emotions = CNN_emotions()
emotions.load_state_dict(torch.load('emotions.pth', torch.device('cpu')))
emotions = emotions.train(False)

vocabulary = models.vacabulary

batch_of_captions_into_matrix = models.batch_of_captions_into_matrix
app = Flask(__name__)

tags = {
    0: 'Angry',
    1: 'Disgust',
Exemple #7
0
model_types=['RNN','GRU']
numLayers=[2,2]
seqLen=[35,35]
seq_len=[35,70]
samples=10
path=['best_params_RNN.pt','best_params_GRU.pt']




for m in range(len(model_types)):
	for s in range(len(seq_len)):
		print('Processing model: '+model_types[m]+' seq_len: '+str(seq_len[s])+'\n')
		if model_types[m]=='RNN':
			model = RNN(emb_size=embSize[m], hidden_size=hiddenSize[m], 
					seq_len=seqLen[m], batch_size=batchSize[m],
					vocab_size=vocab_size, num_layers=numLayers[m], 
					dp_keep_prob=dropOut[m])
		else:
			model =GRU(emb_size=embSize[m], hidden_size=hiddenSize[m], 
					seq_len=seqLen[m], batch_size=batchSize[m],
					vocab_size=vocab_size, num_layers=numLayers[m], 
					dp_keep_prob=dropOut[m])
		model.load_state_dict(torch.load(path[m]))
		model = model.to(device)
		hidden = nn.Parameter(torch.zeros(numLayers[m],samples,hiddenSize[m])).to(device)
		input=torch.ones(10000)*1/1000
		input=torch.multinomial(input,samples).to(device)
		model.eval()
		output=model.generate(input, hidden, seq_len[s])
		print('Saving generated samples')
		fid=open(model_types[m]+'_' +str(seq_len[s])+'.txt','w')
Exemple #8
0
    embedding)

print(embeddings)
"""
GET THE MODEL
"""
if MODEL == "CNN":
    logits = CNN.get_model(X,
                           W=embeddings,
                           is_training=is_training,
                           filters=filters,
                           n_classes=n_classes)
elif MODEL == "RNN":
    logits = RNN.get_model(X,
                           W=embeddings,
                           dropout_keep_prob=dropout_keep_prob,
                           hidden_size=HIDDEN_UNITS,
                           n_classes=n_classes,
                           num_layers=NUM_LAYERS)

print(logits)
softmax = tf.nn.softmax(logits)

num_params = np.sum(
    [np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])

print("{} params to train".format(num_params))

train_op, loss = train_ops.train_op(logits,
                                    y=y,
                                    learning_rate=lr,
                                    optimizer=OPTIMIZER)

if __name__ == '__main__':
    # 1.data load
    print('data load start')
    train_texts, train_labels = read_imdb_split(train_data_path)
    train_data = IMDBDataset(train_texts, train_labels, word2idx)
    trainloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, collate_fn=collate_imdb,
                                              shuffle=True)
    test_texts, test_labels = read_imdb_split(test_data_path)
    test_data = IMDBDataset(test_texts, test_labels, word2idx, attack_label=1)
    testloader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, collate_fn=collate_imdb,
                                             shuffle=False)
    print('data load end')

    random.seed(11)
    np.random.seed(11)
    torch.manual_seed(11)

    # 2.train or test
    criterion = nn.CrossEntropyLoss()
    if mode == 1:
        model = RNN(vocab_size=vocab_size, embedding_dim=300, hidden_dim=300, output_dim=2).to(device)
        optimizer = torch.optim.Adam(model.parameters(), lr=0.0004)
        train(model)
    else:
        baseline_model = RNN(vocab_size=vocab_size, embedding_dim=300, hidden_dim=300, output_dim=2).to(device)
        baseline_model.load_state_dict(torch.load(output_model_path))
        baseline_model.eval()  # 开启eval状态,不再随机dropout
        evaluate_model(baseline_model, testloader, criterion)
Exemple #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--vocab_path',
                        type=str,
                        default='ckpt_blog_td/vocab.txt',
                        help='the location of checkpointing files')
    parser.add_argument('--ckpt_path',
                        type=str,
                        default='ckpt_blog_td2',
                        help='the location of checkpointing files')
    parser.add_argument('--model',
                        type=str,
                        default='t-d-s2s',
                        help='model types: t-d, t-d-s2s or rnn')
    parser.add_argument('--beam_size',
                        type=int,
                        default=3,
                        help='beam search size')
    parser.add_argument('--prime',
                        type=str,
                        default='chia tay me di buc ca minh')
    parser.add_argument('--saved_args_path',
                        type=str,
                        default="./ckpt_blog_td2/args.pkl")

    args = parser.parse_args()
    with open(args.saved_args_path, 'rb') as f:
        saved_args = cPickle.load(f)
    word2idx, idx2word = load_vocab(args.vocab_path)
    accent_vocab = dict()
    for token, idx in word2idx.items():
        raw_token = tone_utils.clear_all_marks(
            token) if token[0] != "<" else token
        if raw_token not in accent_vocab:
            accent_vocab[raw_token] = [token]
        else:
            curr = accent_vocab[raw_token]
            if token not in curr:
                curr.append(token)

    unk_idx = word2idx["<unk>"]
    if args.model == "t-d" or args.model == "t-d-s2s":
        # quick fix
        model = TransformerDecoder(False, saved_args)
    else:
        model = RNN(False, saved_args)

    with tf.Session(
            graph=model.graph if args.model != "rnn" else None) as sess:
        tf.global_variables_initializer().run()
        saver = tf.train.Saver(tf.global_variables())
        ckpt = tf.train.get_checkpoint_state(args.ckpt_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            words = basic_tokenizer(args.prime)
            if args.model == "t-d":
                sos_idx = word2idx["<sos>"]
                pad_idx = word2idx["<pad>"]
                init_state = np.full(shape=(saved_args.maxlen + 1),
                                     fill_value=pad_idx)
                init_state[0] = sos_idx
                init_probs = sess.run(
                    tf.nn.softmax(model.logits),
                    feed_dict={model.x: np.atleast_2d(init_state)})[0]
                paths = beamsearch_transformer(sess, model, words,
                                               args.beam_size,
                                               saved_args.maxlen, init_probs,
                                               accent_vocab, word2idx)
            elif args.model == "rnn":
                x = np.zeros((1, 1))
                words = basic_tokenizer(args.prime)
                init_state = sess.run(model.cell.zero_state(1, tf.float32))
                if words[0] != "<eos>":
                    words = ["<eos>"] + words
                out_state = init_state
                x[0,
                  0] = word2idx[words[0]] if words[0] in word2idx else unk_idx
                # print(x[0,0])
                feed = {model.input_data: x, model.initial_state: out_state}
                [probs, out_state] = sess.run([model.probs, model.final_state],
                                              feed)
                paths = beamsearch_rnn(sess, model, words, args.beam_size,
                                       out_state, probs[0], accent_vocab,
                                       word2idx)
            else:
                pad_idx = word2idx["<pad>"]
                ref = []
                for idx, token in idx2word.items():
                    cleared = clear_all_marks(token)
                    if cleared not in ref:
                        ref.append(cleared)
                words = basic_tokenizer(args.prime)
                feed_x = np.asarray([ref.index(w) for w in words])
                feed_x = np.atleast_2d(
                    np.lib.pad(feed_x, [0, saved_args.maxlen - len(feed_x)],
                               'constant',
                               constant_values=pad_idx))
                feed = {model.x: feed_x}
                paths = [sess.run(model.preds, feed_dict=feed)]
                paths[0][len(words):] = pad_idx
            result = ""
            for path in paths:
                for idx, token in enumerate(path):
                    result += idx2word[token] if token != unk_idx else words[
                        idx if args.model != "rnn" else idx + 1]
                    result += " "
                result += "\n"
            print(result)
def _load_model(emb_size, hidden_size, seq_len, batch_size, vocab_size,
                num_layers, dp_keep_prob, PATH, model_type):
    # Load model (Change to RNN if you want RNN to predict)
    if model_type == 'RNN':
        model = RNN(emb_size, hidden_size, seq_len, batch_size, vocab_size,
                    num_layers, dp_keep_prob)
    else:
        model = GRU(emb_size, hidden_size, seq_len, batch_size, vocab_size,
                    num_layers, dp_keep_prob)

    if torch.cuda.is_available():
        model.load_state_dict(torch.load(PATH)).cuda()
        model.eval()
    else:
        model.load_state_dict(torch.load(PATH, map_location='cpu'))
        model.eval()
    return model
Exemple #12
0
MAYBE TODO:
1. attention mechanism to the actor
2. attention mechanism in the predictor. 
option 1: convert the vector into a feature map 
option 2: use directly feature map maybe use feature maps like 5x5x256 and then apply a transformer to it
3. add debugging to see if everything works and how the agent is behaving
'''
vae_sess = tf.Session()
rnn_sess = tf.Session()
actor_sess = tf.Session()

env = EnvWrap(FLAGS.init_frame_skip, FLAGS.frame_skip, FLAGS.env,
              FLAGS.renderGame)
vaegan = VAEGAN.VAEGAN(vae_sess)
rnn = RNN.RNN(rnn_sess)
actor = ACTOR.ACTOR(actor_sess)
mcts = MCTS.Tree(rnn, actor)
trainer = Trainer()

#If called, train the VAEGAN AND RNN before the actor
if (FLAGS.preprocessing):
    preprocessing.run(env, vaegan, trainer, rnn)

if (FLAGS.playing):
    #Make the actor play and train VAEGAN, RNN and actor
    playing.run(env, vaegan, rnn, actor, trainer, mcts)
'''
def main():
    
    #Tran alphazero using MCTS
Exemple #13
0
    os.rename(tmp_run_path, output_path)

    return test_loss


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-m", "--model_type", type=str, default="linguistic")
    args = parser.parse_args()
    set_default_tensor()

    if args.model_type == "linguistic":
        cfg = LinguisticConfig()
        test_features, test_labels, val_features, val_labels, train_features, train_labels = load_linguistic_dataset(
        )
        model = RNN(cfg)
    elif args.model_type == "acoustic-lld":
        cfg = AcousticLLDConfig()
        test_features, test_labels, val_features, val_labels, train_features, train_labels = load_acoustic_features_dataset(
        )
        model = RNN(cfg)
    elif args.model_type == "acoustic-spectrogram":
        cfg = AcousticSpectrogramConfig()
        test_features, test_labels, val_features, val_labels, train_features, train_labels = load_spectrogram_dataset(
        )
        model = CNN(cfg)
    else:
        raise Exception(
            "model_type parameter has to be one of [linguistic|acoustic-lld|acoustic-spectrogram]"
        )
    """Converting model to specified hardware and format"""
Exemple #14
0
def _load_model(model_type):
    emb_size = 200
    hidden_size = 1500
    seq_len = 35  # 70
    batch_size = 20
    vocab_size = 10000
    num_layers = 2
    dp_keep_prob = 0.35

    # Load model (Change to RNN if you want RNN to predict)
    if model_type == 'RNN':
        model = RNN(emb_size, hidden_size, seq_len, batch_size, vocab_size,
                    num_layers, dp_keep_prob)
        PATH = os.path.join("RNN_ADAM_0", "best_params.pt")
    else:
        model = GRU(emb_size, hidden_size, seq_len, batch_size, vocab_size,
                    num_layers, dp_keep_prob)
        PATH = os.path.join("GRU_SGD_LR_SCHEDULE_0", "best_params.pt")

    if torch.cuda.is_available():
        model.load_state_dict(torch.load(PATH)).cuda()
        model.eval()
    else:
        model.load_state_dict(torch.load(PATH, map_location='cpu'))
        model.eval()
    return model
Exemple #15
0
    def __init__(self,
                 layers=[512, 256, 128, 64, 32],
                 filters=[64, 128, 256, 512],
                 MODEL="FF",
                 HIDDEN_UNITS=32,
                 NUM_LAYERS=2,
                 do_val=False,
                 OPTIMIZER='adam',
                 DEV_SPLIT=0.2,
                 NUM_EPOCH=50,
                 min_ngram=1,
                 up=5,
                 max_features=None,
                 dataset="PAN2019",
                 logger=None,
                 opts=None,
                 DEBUG=False,
                 lang="es"):
        """
        Vars
        """
        BATCH_SIZE = opts.batch_size
        logger = logger or logging.getLogger(__name__)
        # MODEL = "RNN"
        # MODEL = "CNN"
        if not DEBUG:

            ## PAN
            path = opts.tr_data + '/' + lang
            path_test = opts.i + '/' + lang
            sent = 0
            if lang == 'en': sent = 0
            if do_val:
                txt_train = opts.file_i + "/{}/truth-train.txt".format(lang)
                txt_dev = opts.file_i + "/{}/truth-dev.txt".format(lang)
                dt_train = process.PAN2019(path=path,
                                           txt=txt_train,
                                           join_all=MODEL == "FF",
                                           sentiment_id=sent)
                dt_dev = process.PAN2019(path=path,
                                         txt=txt_dev,
                                         join_all=MODEL == "FF",
                                         sentiment_id=sent)
                fnames_dev = dt_dev.fnames
                y_dev = dt_dev.y
                x_dev = dt_dev.X
                # sent_dev = dt_dev.sentiment
                del dt_dev
            else:
                txt_train = opts.file_i + "/{}/truth.txt".format(lang)
                dt_train = process.PAN2019(path=path,
                                           txt=txt_train,
                                           join_all=MODEL == "FF",
                                           sentiment_id=sent)

            dt_test = process.PAN2019_Test(path=path_test,
                                           join_all=MODEL == "FF",
                                           sentiment_id=sent)
            n_classes = 2  # bot or not bot

            # sent_train = dt_train.sentiment
            # sent_test = dt_test.sentiment

            x_train = dt_train.X
            y_train = dt_train.y
            y2_train = dt_train.y2
            print(len(x_train))
            print(len(y_train))
            print(len(y2_train))

            x_test = dt_test.X
            # y_test = dt_test.y

            fnames_train = dt_train.fnames
            fnames_test = dt_test.fnames

            labelencoder = LabelEncoder()  #set
            y_train_ = np.array(y_train).astype(str)
            # y_test_ = np.array(y_test).astype(str)
            labelencoder.fit(y_train_)
            y_train_ = labelencoder.transform(y_train_)
            # y_test_ = labelencoder.transform(y_test_)
            n_values = len(np.unique(y_train_))
            # To One hot
            y_train = to_categorical(y_train_, n_values)

            # y_test = to_categorical(y_test_, n_values)

            if max_features:

                rep = TfidfVectorizer(ngram_range=(min_ngram, up),
                                      max_features=max_features)
            else:
                rep = TfidfVectorizer(ngram_range=(min_ngram, up))

            del dt_train
            del dt_test

            logger.info("fit_transform tfidf")
            texts_rep_train = rep.fit_transform(x_train)
            logger.info("To array")
            texts_rep_train = texts_rep_train.toarray()

            logger.info("transform tfidf")
            text_test_rep = rep.transform(x_test)
            logger.info("To array")
            text_test_rep = text_test_rep.toarray()
            if do_val:
                text_dev_rep = rep.transform(x_dev)
                text_dev_rep = text_dev_rep.toarray()
                y_dev_ = np.array(y_dev).astype(str)
                y_dev_ = labelencoder.transform(y_dev_)
                y_dev = to_categorical(y_dev_, n_values)

            if MODEL == "CNN":
                num = opts.num_tweets
                texts_rep_train = texts_rep_train.reshape(
                    int(texts_rep_train.shape[0] / num), num,
                    texts_rep_train.shape[1])
                text_test_rep = text_test_rep.reshape(
                    int(text_test_rep.shape[0] / num), num,
                    text_test_rep.shape[1])

        else:
            logger.info(" --------------- DEBUG ON ------------------")
            n_classes = 2
            n_vcab = 10000
            train_data = 128
            dev_data = 50
            texts_rep_train = np.random.randn(train_data, 100, n_vcab)
            text_test_rep = np.random.randn(dev_data, 100, n_vcab)
            y_train = np.eye(n_classes)[np.random.choice(
                n_classes, train_data)]
            y_test = np.eye(n_classes)[np.random.choice(n_classes, dev_data)]

            alphabet = list(
                'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
            )
            np_alphabet = np.array(alphabet, dtype="|S1")
            fnames_train = np.random.choice(np_alphabet, [train_data])
            fnames_test = np.random.choice(np_alphabet, [dev_data])
            logger.info("Random data created")

        # print(len(sent_train))
        # print(texts_rep_train.shape)
        # texts_rep_train = np.concatenate((texts_rep_train, np.expand_dims(sentiment.append([blob.sentiment.polarity, blob.sentiment.subjectivity]),axis=1)), axis=-1)
        # text_test_rep = np.concatenate((text_test_rep, np.expand_dims(sent_test,axis=1)), axis=-1)
        # text_dev_rep = np.concatenate((text_dev_rep, np.expand_dims(sent_dev,axis=1)), axis=-1)
        # texts_rep_train = np.concatenate((texts_rep_train, sent_train), axis=-1)
        logger.info("texts_rep_train: {}".format(texts_rep_train.shape))
        logger.info("y_train: {}".format(y_train.shape))

        # X_train, X_val, X_test, y_train, y_val, y_test, MAX_SEQUENCE_LENGTH = prepare_data(
        #     dir_word_embeddings, fname_vocab, train_path, test_path, EMBEDDING_DIM,
        #     VALIDATION_SPLIT=DEV_SPLIT, MAX_SEQUENCE_LENGTH=MAX_SEQUENCE_LENGTH
        # )
        """""" """""" """""" """""" """""" """""
        # Tensorflow
        """ """""" """""" """""" """""" """""" ""

        batch_size = tf.placeholder(tf.int64, name="batch_size")
        if MODEL == "CNN":
            X = tf.placeholder(tf.float32,
                               shape=[
                                   None, texts_rep_train.shape[1],
                                   texts_rep_train.shape[2]
                               ],
                               name="X")
        else:
            X = tf.placeholder(tf.float32,
                               shape=[None, len(texts_rep_train[0])],
                               name="X")
        print(X)
        y = tf.placeholder(tf.int64, shape=[None, n_classes], name="y")
        fnames_plc = tf.placeholder(tf.string, shape=[None], name="fnames_plc")
        lr = tf.placeholder(tf.float32, shape=[], name="lr")
        is_training = tf.placeholder_with_default(False,
                                                  shape=[],
                                                  name='is_training')
        dropout_keep_prob = tf.placeholder_with_default(1.0,
                                                        shape=(),
                                                        name="dropout")
        """
        GET THE MODEL
        """
        if MODEL == "CNN":
            logits = CNN.get_model(X,
                                   is_training=is_training,
                                   filters=filters,
                                   n_classes=n_classes,
                                   tf_idf=True,
                                   logger=logger)
        elif MODEL == "RNN":
            logits = RNN.get_model(X,
                                   dropout_keep_prob,
                                   hidden_size=HIDDEN_UNITS,
                                   n_classes=n_classes,
                                   num_layers=NUM_LAYERS)
        elif MODEL == "FF":
            logits = FF.get_model(X,
                                  dropout_keep_prob,
                                  is_training=is_training,
                                  layers=layers,
                                  n_classes=n_classes)
        logger.info(logits)
        softmax = tf.nn.softmax(logits)

        num_params = np.sum([
            np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()
        ])

        logger.info("{} params to train".format(num_params))

        train_op, loss = train_ops.train_op(logits,
                                            y,
                                            learning_rate=lr,
                                            optimizer=OPTIMIZER)
        """"""
        """Test de embeddings"""

        train_dataset = tf.data.Dataset.from_tensor_slices(
            (X, y, fnames_plc)).batch(batch_size).shuffle(buffer_size=12)
        dev_dataset = tf.data.Dataset.from_tensor_slices(
            (X, y, fnames_plc)).batch(batch_size).shuffle(buffer_size=12)
        test_dataset = tf.data.Dataset.from_tensor_slices(
            (X, fnames_plc)).batch(batch_size).shuffle(buffer_size=12)

        train_data = (texts_rep_train, y_train, fnames_train)
        if do_val:
            dev_data = (text_dev_rep, y_dev, fnames_dev)
        test_data = (text_test_rep, fnames_test)
        print(text_test_rep.shape)
        print(len(fnames_test))

        # create a iterator of the correct shape and type
        iter = tf.data.Iterator.from_structure(train_dataset.output_types,
                                               train_dataset.output_shapes)
        iter_test = tf.data.Iterator.from_structure(test_dataset.output_types,
                                                    test_dataset.output_shapes)

        # create the initialisation operations
        train_init_op = iter.make_initializer(train_dataset)
        dev_init_op = iter.make_initializer(dev_dataset)
        test_init_op = iter_test.make_initializer(test_dataset)

        epoch_start = 0
        ## Train
        sess = tf.Session()
        init_g = tf.global_variables_initializer()
        init_l = tf.local_variables_initializer()
        sess.run(init_g)
        sess.run(init_l)
        best_acc = 0
        for epoch in range(epoch_start, NUM_EPOCH + 1):
            sess.run(train_init_op,
                     feed_dict={
                         X: train_data[0],
                         y: train_data[1],
                         fnames_plc: train_data[2],
                         batch_size: BATCH_SIZE,
                     })

            current_batch_index = 0
            next_element = iter.get_next()
            loss_count = 0
            while True:

                try:
                    data = sess.run([next_element])
                except tf.errors.OutOfRangeError:
                    break

                current_batch_index += 1
                data = data[0]
                batch_x, batch_tgt, batch_fnames = data

                _, loss_result = sess.run(
                    [train_op, loss],
                    feed_dict={
                        X: batch_x,
                        y: batch_tgt,
                        lr: train_ops.lr_scheduler(epoch),
                        batch_size: BATCH_SIZE,
                        is_training: True,
                        dropout_keep_prob: 0.3,
                    })
                # print("Loss: {}".format(loss_result))
                loss_count += loss_result

            loss_count = loss_count / current_batch_index
            logger.info("Loss on epoch {} : {} - LR: {}".format(
                epoch, loss_count, train_ops.lr_scheduler(epoch)))
            acc = 0
            if do_val:
                print("Eval")
                ## Eval
                sess.run(
                    dev_init_op,
                    feed_dict={
                        # sess.run(dev_init_op, feed_dict={
                        X: dev_data[0],
                        y: dev_data[1],
                        fnames_plc: dev_data[2],
                        batch_size: BATCH_SIZE,
                    })

                current_batch_index = 0
                next_element = iter.get_next()

                while True:

                    try:
                        data = sess.run([next_element])
                    except tf.errors.OutOfRangeError:
                        break

                    current_batch_index += 1
                    data = data[0]
                    batch_x, batch_tgt, batch_fnames = data

                    results = sess.run(
                        [softmax],
                        feed_dict={
                            X: batch_x,
                            y: batch_tgt,
                            lr: train_ops.lr_scheduler(epoch),
                            batch_size: BATCH_SIZE,
                            is_training: False,
                            dropout_keep_prob: 1.0
                        })
                    results = results[0]
                    acc_aux = metrics.accuracy(X=results, y=batch_tgt)
                    acc += acc_aux

                acc = acc / current_batch_index
                print("Acc Val epoch {} : {}".format(epoch, acc))
                print("----------")
                if acc > best_acc:
                    best_acc = acc
                    logger.info("New acc : {}".format(best_acc))
                    save_best(sess, opts.work_dir)
        if opts.testing and opts.do_val:
            logger.info("Model: {}".format(MODEL))
            logger.info("layers: {}".format(layers))
            logger.info("max_features: {}".format(max_features))
            logger.info("Min and max features: {} - {}".format(min_ngram, up))
            logger.info("Best acc : {}".format(best_acc))
            exit()
        """
        ----------------- TEST -----------------
        """
        logger.info("\n-- TEST --\n")
        logger.info("Restoring the best Checkpoint.")
        # restore_file = sesions.restore_from_best(sess, save_path)
        restore_file = restore_from_best(sess, opts.work_dir)
        if restore_file:
            logger.info("Best model restored")
        else:
            logger.info("Cant restore the best model.")
            exit()
        Y_FALSA = np.random.randint(1, size=(BATCH_SIZE, n_classes))
        print(Y_FALSA.shape)
        logger.info("\n-- TEST --\n")

        sess.run(test_init_op,
                 feed_dict={
                     X: test_data[0],
                     y: Y_FALSA,
                     fnames_plc: test_data[1],
                     batch_size: BATCH_SIZE,
                 })

        current_batch_index = 0
        next_element = iter_test.get_next()
        loss_count = 0
        classifieds = []
        classifieds_to_write = []
        while True:

            try:
                data = sess.run([next_element])
            except tf.errors.OutOfRangeError:
                break

            current_batch_index += 1
            data = data[0]
            batch_x, batch_fnames = data

            results = sess.run(
                [softmax],
                feed_dict={
                    X: batch_x,
                    y: Y_FALSA,
                    batch_size: BATCH_SIZE,
                    dropout_keep_prob: 1.0,
                    lr: train_ops.lr_scheduler(1)
                })

            for i in range(len(results[0])):
                # to write
                hyp = [np.argmax(results[0][i], axis=-1)]
                hyp = labelencoder.inverse_transform(hyp)[
                    0]  #real label   #set
                doc_name = batch_fnames[i].decode("utf-8").split("/")[-1]

                classifieds_to_write.append((doc_name, lang, hyp))

        logger.info("----------")
        logger.info("Writting results in output dir {}".format("{}/{}".format(
            opts.o, lang)))

        if sent != 2 and lang == 'en':
            dt_train = process.PAN2019(path=path,
                                       txt=txt_train,
                                       join_all=MODEL == "FF",
                                       sentiment_id=2)
            x_train = dt_train.X
            y2_train = dt_train.y2
            del dt_train
            if max_features:
                rep = TfidfVectorizer(ngram_range=(min_ngram, up),
                                      max_features=max_features)
            else:
                rep = TfidfVectorizer(ngram_range=(min_ngram, up))
            texts_rep_train = rep.fit_transform(x_train).toarray()
            dt_test = process.PAN2019_Test(path=path_test,
                                           join_all=MODEL == "FF",
                                           sentiment_id=2)
            del dt_test
            x_test = dt_test.X
            text_test_rep = rep.transform(x_test).toarray()

        process.write_from_array(classifieds_to_write,
                                 "{}/{}".format(opts.o, lang), x_train,
                                 texts_rep_train, y2_train, x_test,
                                 text_test_rep, fnames_test)
def main():
    parser = argparse.ArgumentParser(
        description='Spoken Language Idenfication')
    parser.add_argument('--hidden_size',
                        type=int,
                        default=512,
                        help='hidden size of model (default: 256)')
    parser.add_argument('--layer_size',
                        type=int,
                        default=3,
                        help='number of layers of model (default: 3)')
    parser.add_argument('--n_class',
                        type=int,
                        default=2,
                        help='number of classes of data (default: 7)')
    parser.add_argument('--dropout',
                        type=float,
                        default=0.2,
                        help='dropout rate in training (default: 0.2')
    parser.add_argument('--bidirectional',
                        default=True,
                        action='store_true',
                        help='use bidirectional RNN (default: False')
    parser.add_argument('--batch_size',
                        type=int,
                        default=2,
                        help='batch size in training (default: 32')
    parser.add_argument(
        '--workers',
        type=int,
        default=4,
        help='number of workers in dataset loader (default: 4)')
    parser.add_argument('--max_epochs',
                        type=int,
                        default=10,
                        help='number of max epochs in training (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=1e-04,
                        help='learning rate (default: 0.0001)')
    parser.add_argument('--no_cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        help='random seed (default: 1)')
    parser.add_argument('--save_name',
                        type=str,
                        default='model',
                        help='the name of model')
    parser.add_argument('--mode', type=str, default='train')
    parser.add_argument('--nn_type',
                        type=str,
                        default='crnn',
                        help='type of neural networks')

    args = parser.parse_args()

    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device('cuda' if args.cuda else 'cpu')

    feature_size = N_FFT / 2 + 1

    cnn = CNN.CNN(feature_size)
    rnn = RNN.RNN(cnn.feature_size,
                  args.hidden_size,
                  args.n_class,
                  input_dropout_p=args.dropout,
                  dropout_p=args.dropout,
                  n_layers=args.layer_size,
                  bidirectional=args.bidirectional,
                  rnn_cell='gru',
                  variable_lengths=False)

    model = CRNN.CRNN(cnn, rnn)
    model.flatten_parameters()

    model = nn.DataParallel(model).to(device)

    optimizer = optim.Adam(model.module.parameters(), lr=args.lr)
    criterion = nn.CrossEntropyLoss(reduction='sum').to(device)

    if args.mode != 'train':
        return

    download_data()

    kor_db_list = []
    search('dataset/train/train_data', kor_db_list)

    train_wav_paths = np.loadtxt("dataset/TRAIN_list.csv",
                                 delimiter=',',
                                 dtype=np.unicode)
    valid_wav_paths = np.loadtxt("dataset/TEST_developmentset_list.csv",
                                 delimiter=',',
                                 dtype=np.unicode)
    test_wav_paths = np.loadtxt("dataset/TEST_coreset_list.csv",
                                delimiter=',',
                                dtype=np.unicode)

    train_wav_paths = list(
        map(lambda x: "dataset/TIMIT/{}.WAV".format(x), train_wav_paths))
    valid_wav_paths = list(
        map(lambda x: "dataset/TIMIT/{}.WAV".format(x), valid_wav_paths))
    test_wav_paths = list(
        map(lambda x: "dataset/TIMIT/{}.WAV".format(x), test_wav_paths))

    min_loss = 100000
    begin_epoch = 0

    loss_acc = [[], [], [], []]

    train_batch_num, train_dataset_list, valid_dataset, test_dataset = \
        split_dataset(args, train_wav_paths, valid_wav_paths, test_wav_paths, kor_db_list)

    logger.info('start')

    train_begin = time.time()

    for epoch in range(begin_epoch, args.max_epochs):

        train_queue = queue.Queue(args.workers * 2)

        train_loader = MultiLoader(train_dataset_list, train_queue,
                                   args.batch_size, args.workers, args.nn_type)
        train_loader.start()

        train_loss, train_acc = train(model, train_batch_num, train_queue,
                                      criterion, optimizer, device,
                                      train_begin, args.workers, 10)
        logger.info('Epoch %d (Training) Loss %0.4f Acc %0.4f' %
                    (epoch, train_loss, train_acc))

        train_loader.join()

        loss_acc[0].append(train_loss)
        loss_acc[1].append(train_acc)

        valid_queue = queue.Queue(args.workers * 2)

        valid_loader = BaseDataLoader(valid_dataset, valid_queue,
                                      args.batch_size, 0, args.nn_type)
        valid_loader.start()

        eval_loss, eval_acc = evaluate(model, valid_loader, valid_queue,
                                       criterion, device)
        logger.info('Epoch %d (Evaluate) Loss %0.4f Acc %0.4f' %
                    (epoch, eval_loss, eval_acc))

        valid_loader.join()

        loss_acc[2].append(eval_loss)
        loss_acc[3].append(eval_acc)

        best_model = (eval_loss < min_loss)

        if best_model:
            min_loss = eval_loss
            torch.save(model.state_dict(), './save_model/best_model.pt')
            save_epoch = epoch

    model.load_state_dict(torch.load('./save_model/best_model.pt'))

    test_queue = queue.Queue(args.workers * 2)

    test_loader = BaseDataLoader(test_dataset, test_queue, args.batch_size, 0,
                                 args.nn_type)
    test_loader.start()

    confusion_matrix = torch.zeros((args.n_class, args.n_class))
    test_loss, test_acc = evaluate(model, test_loader, test_queue, criterion,
                                   device, confusion_matrix)
    logger.info('Epoch %d (Test) Loss %0.4f Acc %0.4f' %
                (save_epoch, test_loss, test_acc))

    test_loader.join()

    save_data(loss_acc, test_loss, test_acc,
              confusion_matrix.to('cpu').numpy())
    plot_data(loss_acc, test_loss, test_acc)

    return 0
###############################################################################
#
# MODEL SETUP
#
###############################################################################

# NOTE ==============================================
# This is where your model code will be called. You may modify this code
# if required for your implementation, but it should not typically be necessary,
# and you must let the TAs know if you do so.
if args.model == 'RNN':
    model = RNN(emb_size=args.emb_size,
                hidden_size=args.hidden_size,
                seq_len=args.seq_len,
                batch_size=args.batch_size,
                vocab_size=vocab_size,
                num_layers=args.num_layers,
                dp_keep_prob=args.dp_keep_prob)
elif args.model == 'GRU':
    model = GRU(emb_size=args.emb_size,
                hidden_size=args.hidden_size,
                seq_len=args.seq_len,
                batch_size=args.batch_size,
                vocab_size=vocab_size,
                num_layers=args.num_layers,
                dp_keep_prob=args.dp_keep_prob)
elif args.model == 'TRANSFORMER':
    if args.debug:  # use a very small model
        model = TRANSFORMER(vocab_size=vocab_size, n_units=16, n_blocks=2)
    else:
    def __init__(self,
                 domain,
                 train_data_file,
                 validation_data_file,
                 test_data_file,
                 minibatch_size,
                 rng,
                 device,
                 behav_policy_file_wDemo,
                 behav_policy_file,
                 context_input=False,
                 context_dim=0,
                 drop_smaller_than_minibatch=True,
                 folder_name='/Name',
                 autoencoder_saving_period=20,
                 resume=False,
                 sided_Q='negative',
                 autoencoder_num_epochs=50,
                 autoencoder_lr=0.001,
                 autoencoder='AIS',
                 hidden_size=16,
                 ais_gen_model=1,
                 ais_pred_model=1,
                 embedding_dim=4,
                 state_dim=42,
                 num_actions=25,
                 corr_coeff_param=10,
                 dst_hypers={},
                 cde_hypers={},
                 odernn_hypers={},
                 **kwargs):
        '''
        We assume discrete actions and scalar rewards!
        '''

        self.rng = rng
        self.device = device
        self.train_data_file = train_data_file
        self.validation_data_file = validation_data_file
        self.test_data_file = test_data_file
        self.minibatch_size = minibatch_size
        self.drop_smaller_than_minibatch = drop_smaller_than_minibatch
        self.autoencoder_num_epochs = autoencoder_num_epochs
        self.autoencoder = autoencoder
        self.autoencoder_lr = autoencoder_lr
        self.saving_period = autoencoder_saving_period
        self.resume = resume
        self.sided_Q = sided_Q
        self.num_actions = num_actions
        self.state_dim = state_dim
        self.corr_coeff_param = corr_coeff_param

        self.context_input = context_input  # Check to see if we'll one-hot encode the categorical contextual input
        self.context_dim = context_dim  # Check to see if we'll remove the context from the input and only use it for decoding
        self.hidden_size = hidden_size

        if self.context_input:
            self.input_dim = self.state_dim + self.context_dim + self.num_actions
        else:
            self.input_dim = self.state_dim + self.num_actions

        self.autoencoder_lower = self.autoencoder.lower()
        self.data_folder = folder_name + f'/{self.autoencoder_lower}_data'
        self.checkpoint_file = folder_name + f'/{self.autoencoder_lower}_checkpoints/checkpoint.pt'
        if not os.path.exists(folder_name +
                              f'/{self.autoencoder_lower}_checkpoints'):
            os.mkdir(folder_name + f'/{self.autoencoder_lower}_checkpoints')
        if not os.path.exists(folder_name + f'/{self.autoencoder_lower}_data'):
            os.mkdir(folder_name + f'/{self.autoencoder_lower}_data')
        self.store_path = folder_name
        self.gen_file = folder_name + f'/{self.autoencoder_lower}_data/{self.autoencoder_lower}_gen.pt'
        self.pred_file = folder_name + f'/{self.autoencoder_lower}_data/{self.autoencoder_lower}_pred.pt'

        if self.autoencoder == 'AIS':
            self.container = AIS.ModelContainer(device, ais_gen_model,
                                                ais_pred_model)
            self.gen = self.container.make_encoder(
                self.hidden_size,
                self.state_dim,
                self.num_actions,
                context_input=self.context_input,
                context_dim=self.context_dim)
            self.pred = self.container.make_decoder(self.hidden_size,
                                                    self.state_dim,
                                                    self.num_actions)

        elif self.autoencoder == 'AE':
            self.container = AE.ModelContainer(device)
            self.gen = self.container.make_encoder(
                self.hidden_size,
                self.state_dim,
                self.num_actions,
                context_input=self.context_input,
                context_dim=self.context_dim)
            self.pred = self.container.make_decoder(self.hidden_size,
                                                    self.state_dim,
                                                    self.num_actions)

        elif self.autoencoder == 'DST':
            self.dst_hypers = dst_hypers
            self.container = DST.ModelContainer(device)
            self.gen = self.container.make_encoder(
                self.input_dim,
                self.hidden_size,
                gru_n_layers=self.dst_hypers['gru_n_layers'],
                augment_chs=self.dst_hypers['augment_chs'])
            self.pred = self.container.make_decoder(
                self.hidden_size, self.state_dim,
                self.dst_hypers['decoder_hidden_units'])

        elif self.autoencoder == 'DDM':
            self.container = DDM.ModelContainer(device)

            self.gen = self.container.make_encoder(
                self.state_dim,
                self.hidden_size,
                context_input=self.context_input,
                context_dim=self.context_dim)
            self.pred = self.container.make_decoder(self.state_dim,
                                                    self.hidden_size)
            self.dyn = self.container.make_dyn(self.num_actions,
                                               self.hidden_size)
            self.all_params = chain(self.gen.parameters(),
                                    self.pred.parameters(),
                                    self.dyn.parameters())

            self.inv_loss_coef = 10
            self.dec_loss_coef = 0.1
            self.max_grad_norm = 50

            self.dyn_file = folder_name + '/ddm_data/ddm_dyn.pt'

        elif self.autoencoder == 'RNN':
            self.container = RNN.ModelContainer(device)

            self.gen = self.container.make_encoder(
                self.hidden_size,
                self.state_dim,
                self.num_actions,
                context_input=self.context_input,
                context_dim=self.context_dim)
            self.pred = self.container.make_decoder(self.hidden_size,
                                                    self.state_dim,
                                                    self.num_actions)

        elif self.autoencoder == 'CDE':
            self.cde_hypers = cde_hypers

            self.container = CDE.ModelContainer(device)
            self.gen = self.container.make_encoder(
                self.input_dim + 1,
                self.hidden_size,
                hidden_hidden_channels=self.
                cde_hypers['encoder_hidden_hidden_channels'],
                num_hidden_layers=self.cde_hypers['encoder_num_hidden_layers'])
            self.pred = self.container.make_decoder(
                self.hidden_size, self.state_dim,
                self.cde_hypers['decoder_num_layers'],
                self.cde_hypers['decoder_num_units'])

        elif self.autoencoder == 'ODERNN':
            self.odernn_hypers = odernn_hypers
            self.container = ODERNN.ModelContainer(device)

            self.gen = self.container.make_encoder(self.input_dim,
                                                   self.hidden_size,
                                                   self.odernn_hypers)
            self.pred = self.container.make_decoder(
                self.hidden_size, self.state_dim,
                self.odernn_hypers['decoder_n_layers'],
                self.odernn_hypers['decoder_n_units'])
        else:
            raise NotImplementedError

        self.buffer_save_file = self.data_folder + '/ReplayBuffer'
        self.next_obs_pred_errors_file = self.data_folder + '/test_next_obs_pred_errors.pt'
        self.test_representations_file = self.data_folder + '/test_representations.pt'
        self.test_correlations_file = self.data_folder + '/test_correlations.pt'
        self.policy_eval_save_file = self.data_folder + '/dBCQ_policy_eval'
        self.policy_save_file = self.data_folder + '/dBCQ_policy'
        self.behav_policy_file_wDemo = behav_policy_file_wDemo
        self.behav_policy_file = behav_policy_file

        # Read in the data csv files
        assert (domain == 'sepsis')
        self.train_demog, self.train_states, self.train_interventions, self.train_lengths, self.train_times, self.acuities, self.rewards = torch.load(
            self.train_data_file)
        train_idx = torch.arange(self.train_demog.shape[0])
        self.train_dataset = TensorDataset(self.train_demog, self.train_states,
                                           self.train_interventions,
                                           self.train_lengths,
                                           self.train_times, self.acuities,
                                           self.rewards, train_idx)

        self.train_loader = DataLoader(self.train_dataset,
                                       batch_size=self.minibatch_size,
                                       shuffle=True)

        self.val_demog, self.val_states, self.val_interventions, self.val_lengths, self.val_times, self.val_acuities, self.val_rewards = torch.load(
            self.validation_data_file)
        val_idx = torch.arange(self.val_demog.shape[0])
        self.val_dataset = TensorDataset(self.val_demog, self.val_states,
                                         self.val_interventions,
                                         self.val_lengths, self.val_times,
                                         self.val_acuities, self.val_rewards,
                                         val_idx)

        self.val_loader = DataLoader(self.val_dataset,
                                     batch_size=self.minibatch_size,
                                     shuffle=False)

        self.test_demog, self.test_states, self.test_interventions, self.test_lengths, self.test_times, self.test_acuities, self.test_rewards = torch.load(
            self.test_data_file)
        test_idx = torch.arange(self.test_demog.shape[0])
        self.test_dataset = TensorDataset(self.test_demog, self.test_states,
                                          self.test_interventions,
                                          self.test_lengths, self.test_times,
                                          self.test_acuities,
                                          self.test_rewards, test_idx)

        self.test_loader = DataLoader(self.test_dataset,
                                      batch_size=self.minibatch_size,
                                      shuffle=False)

        # encode CDE data first to save time
        if self.autoencoder == 'CDE':
            self.train_coefs = load_cde_data('train', self.train_dataset,
                                             self.cde_hypers['coefs_folder'],
                                             self.context_input, device)
            self.val_coefs = load_cde_data('val', self.val_dataset,
                                           self.cde_hypers['coefs_folder'],
                                           self.context_input, device)
            self.test_coefs = load_cde_data('test', self.test_dataset,
                                            self.cde_hypers['coefs_folder'],
                                            self.context_input, device)
        print("\nPutting log in %s" % experiment_path)
        args['save_dir'] = experiment_path
        with open(os.path.join(experiment_path, 'exp_config.txt'), 'w') as f:
            for key in sorted(args):
                f.write(key + '    ' + str(args[key]) + '\n')

        ###############################################################################
        #
        # MODEL SETUP
        #
        ###############################################################################
        if args["model"] == 'RNN':
            model = RNN(emb_size=args["emb_size"],
                        hidden_size=args["hidden_size"],
                        seq_len=args["seq_len"],
                        batch_size=args["batch_size"],
                        vocab_size=vocab_size,
                        num_layers=args["num_layers"],
                        dp_keep_prob=args["dp_keep_prob"])
        elif args["model"] == 'GRU':
            model = GRU(emb_size=args["emb_size"],
                        hidden_size=args["hidden_size"],
                        seq_len=args["seq_len"],
                        batch_size=args["batch_size"],
                        vocab_size=vocab_size,
                        num_layers=args["num_layers"],
                        dp_keep_prob=args["dp_keep_prob"])
        elif args["model"] == 'TRANSFORMER':
            if args["debug"]:  # use a very small model
                model = TRANSFORMER(vocab_size=vocab_size,
                                    n_units=16,
Exemple #20
0
def main(args):
    # hyperparameters
    batch_size = args.batch_size
    num_workers = 2

    # Image Preprocessing
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    vocab = load_vocab()

    loader = get_basic_loader(dir_path=os.path.join(args.image_path),
                              transform=transform,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=num_workers)

    # Build the models
    embed_size = args.embed_size
    num_hiddens = args.num_hidden
    checkpoint_path = 'checkpoints'

    encoder = CNN(embed_size)
    decoder = RNN(embed_size,
                  num_hiddens,
                  len(vocab),
                  1,
                  rec_unit=args.rec_unit)

    encoder_state_dict, decoder_state_dict, optimizer, * \
        meta = utils.load_models(args.checkpoint_file)
    encoder.load_state_dict(encoder_state_dict)
    decoder.load_state_dict(decoder_state_dict)

    if torch.cuda.is_available():
        encoder.cuda()
        decoder.cuda()

    # Train the Models
    with torch.no_grad():
        try:
            results = []
            for step, (images, image_ids) in enumerate(loader):
                images = utils.to_var(images, volatile=True)

                features = encoder(images)
                captions = decoder.sample(features)
                captions = captions.cpu().data.numpy()
                captions = [
                    utils.convert_back_to_text(cap, vocab) for cap in captions
                ]
                captions_formatted = [{
                    'image_id': int(img_id),
                    'caption': cap
                } for img_id, cap in zip(image_ids, captions)]
                results.extend(captions_formatted)
                print('Sample:', captions_formatted)
        except KeyboardInterrupt:
            print('Ok bye!')
        finally:
            import json
            file_name = 'captions_model.json'
            with open(file_name, 'w') as f:
                json.dump(results, f)
raw_data = ptb_raw_data(data_path=args.data)
train_data, valid_data, test_data, word_to_id, id_2_word = raw_data
vocab_size = len(word_to_id)
print('  vocabulary size: {}'.format(vocab_size))

###############################################################################
#
# MODEL SETUP
#
###############################################################################

if args.model == 'RNN':
    model = RNN(emb_size=args.emb_size,
                hidden_size=args.hidden_size,
                seq_len=args.seq_len,
                batch_size=args.batch_size,
                vocab_size=vocab_size,
                num_layers=args.num_layers,
                dp_keep_prob=args.dp_keep_prob)
elif args.model == 'GRU':
    model = GRU(emb_size=args.emb_size,
                hidden_size=args.hidden_size,
                seq_len=args.seq_len,
                batch_size=args.batch_size,
                vocab_size=vocab_size,
                num_layers=args.num_layers,
                dp_keep_prob=args.dp_keep_prob)
elif args.model == 'TRANSFORMER':
    if args.debug:  # use a very small model
        model = TRANSFORMER(vocab_size=vocab_size, n_units=16, n_blocks=2)
    else:
                            vocab_file=args.vocab_file)

# set up batching
loader = DataLoader(dataset,
                    batch_size=args.batch_size,
                    shuffle=True,
                    drop_last=True,
                    collate_fn=SmilesCollate(dataset.vocabulary))

# set up model
if args.embedding_size > 0:
    model = RNN(vocabulary=dataset.vocabulary,
                rnn_type=args.rnn_type,
                embedding_size=args.embedding_size,
                hidden_size=args.hidden_size,
                n_layers=args.n_layers,
                dropout=args.dropout,
                bidirectional=args.bidirectional,
                tie_weights=args.tie_weights,
                nonlinearity=args.nonlinearity)
else:
    # no embedding layer (one-hot encoding)
    model = OneHotRNN(vocabulary=dataset.vocabulary,
                      rnn_type=args.rnn_type,
                      hidden_size=args.hidden_size,
                      n_layers=args.n_layers,
                      dropout=args.dropout,
                      bidirectional=args.bidirectional,
                      nonlinearity=args.nonlinearity)

# optionally, load model parameters from file