Exemple #1
0
 def get_predictions(self, frames, scope):
     frames = self._reshape_to_conv(frames)
     cnn = CNN()
     if self.operation == 'training':
         cnn_output = cnn.create_model(frames,
                                       cnn.conv_filters,
                                       keep_prob=self.keep_prob)
     else:
         cnn_output = cnn.create_model(frames,
                                       cnn.conv_filters,
                                       keep_prob=1.0)
     cnn_output = self._reshape_to_rnn(cnn_output)
     rnn = RNN()
     rnn_output = rnn.create_model(cnn_output, scope + '_rnn')
     if self.is_attention:
         attention = Attention(self.batch_size)
         attention_output = attention.create_model(rnn_output,
                                                   scope + '_attention')
         fc = FC(self.num_classes)
         outputs = fc.create_model(attention_output, scope + '_fc')
     else:
         rnn_output = rnn_output[:, -1, :]
         fc = FC(self.num_classes)
         outputs = fc.create_model(rnn_output, scope + '_fc')
     return outputs
Exemple #2
0
def test_build_rnn(dim_in=31, dim_h=11, dim_out=None, i_net=None, o_net=None):
    print 'Building RNN'

    if i_net is None:
        i_net = dict(
            dim_h=17,
            n_layers=2,
            h_act='T.tanh',
            distribution='centered_binomial',
            weight_scale=0.1,
        )
    if o_net is None:
        o_net = dict(dim_h=23,
                     n_layers=2,
                     h_act='T.tanh',
                     weight_scale=0.1,
                     distribution='continuous_binomial')

    nets = dict(i_net=i_net, o_net=o_net)

    trng = RandomStreams(101)
    mlps = RNN.mlp_factory(dim_in, dim_h, dim_out=dim_out, **nets)
    rnn = RNN(dim_in, dim_h, dim_out=dim_out, trng=trng, **mlps)
    rnn.set_tparams()
    print 'RNN formed correctly'

    return rnn
Exemple #3
0
 def test_vanilla_rnn(self):
     rnn = RNN(8000, 100, 8000)
     input_data = np.arange(8000)
     t0 = time.time()
     assert rnn.forward_propagation(input_data)
     tt = time.time() - t0
     print("\nRNN forward propagation %s sec\n" % str(tt))
Exemple #4
0
def main(args):
    glove_dir = args.glove_dir

    with open(glove_dir + '/config.json') as jsonData:
        rawData = json.load(jsonData)

    # merge the two configuration
    config = dict(rawData['config'])
    config.update(vars(args))
    config['word_to_id_dict'] = rawData['word_to_id_dict']
    config['glove_dir'] = glove_dir

    if args.debug is True:
        print('Loading corpus as sets')
    textfile = args.textfile
    train_data, dev_data, test_data = load_corpus_as_sets(
        textfile, rawData['word_to_id_dict'])

    if args.debug is True:
        print('Building graph')
    model = RNN(config)

    if args.debug is True:
        print('Training model')
    model.fit(train_data, dev_data)

    return model.log_dir
Exemple #5
0
def run(model_id, state_id, data_id, lr):
    DATA_PATH = path[data_id]
    result_dir = EXP_DIR + models[model_id] + "/" + states[
        state_id] + "/" + data[data_id].__name__

    if not os.path.isdir(result_dir):
        os.makedirs(result_dir)

    result_log = result_dir + '/log.csv'
    if os.path.isfile(result_log):
        return

    FOLD_NUM = fold_num[data_id]
    acc = []

    conf = Config()
    conf.lr = lr
    for fold in range(FOLD_NUM):
        conf.ckp_file = result_dir + '/fold_' + str(fold + 1) + '.ckpt'
        dataset = data[data_id](DATA_PATH, fold + 1)
        dataset.set_vector_type(int(states[state_id][-1]))
        rnn = RNN(conf, dataset, model_type=models[model_id])
        vld_, pred, labs = rnn.run()
        # Save fold result
        save_result(pred, labs, result_log + '/fold' + str(fold))

        acc.append(vld_)

        print(vld_)
        #return
    acc = np.array(acc)
    acc = np.append(acc, [np.mean(acc, axis=0)], axis=0)
    #Save to CSV File
    np.savetxt(result_log, acc, delimiter=',')
Exemple #6
0
 def __init__(self, nx, ny, nz, horizon, specs):
     super(VAE, self).__init__()
     self.nx = nx
     self.ny = ny
     self.nz = nz
     self.horizon = horizon
     self.rnn_type = rnn_type = specs.get('rnn_type', 'lstm')
     self.x_birnn = x_birnn = specs.get('x_birnn', True)
     self.e_birnn = e_birnn = specs.get('e_birnn', True)
     self.use_drnn_mlp = specs.get('use_drnn_mlp', False)
     self.nh_rnn = nh_rnn = specs.get('nh_rnn', 128)
     self.nh_mlp = nh_mlp = specs.get('nh_mlp', [300, 200])
     # encode
     self.x_rnn = RNN(nx, nh_rnn, bi_dir=x_birnn, cell_type=rnn_type)
     self.e_rnn = RNN(ny, nh_rnn, bi_dir=e_birnn, cell_type=rnn_type)
     self.e_mlp = MLP(2 * nh_rnn, nh_mlp)
     self.e_mu = nn.Linear(self.e_mlp.out_dim, nz)
     self.e_logvar = nn.Linear(self.e_mlp.out_dim, nz)
     # decode
     if self.use_drnn_mlp:
         self.drnn_mlp = MLP(nh_rnn, nh_mlp + [nh_rnn], activation='tanh')
     self.d_rnn = RNN(ny + nz + nh_rnn, nh_rnn, cell_type=rnn_type)
     self.d_mlp = MLP(nh_rnn, nh_mlp)
     self.d_out = nn.Linear(self.d_mlp.out_dim, ny)
     self.d_rnn.set_mode('step')
Exemple #7
0
def main():
    add_pitch, add_roll, add_filter = False, False, True
    n_samples, step = 50, 50
    load_data = LoadHAR(add_pitch=add_pitch,
                        add_roll=add_roll,
                        add_filter=add_filter,
                        n_samples=n_samples,
                        step=step)

    conf = ModelConfiguration()
    conf.load_datasets(
        [load_data.uci_mhealth, load_data.idash, load_data.wisdm1])

    user_idx = -1
    user = None
    # Create a time-string for our cv run
    if user is not None:
        train_idx = conf.users != user
        test_idx = conf.users == user
        conf.cv = ((train_idx, test_idx), )

    for train_index, test_index in conf.cv:
        conf.user = user

        model = RNN(n_in=(n_samples, conf.n_features),
                    n_hidden=[50, 50],
                    dropout_probability=0.5,
                    n_out=conf.n_classes,
                    ccf=False,
                    trans_func=rectify,
                    out_func=softmax)

        if len(conf.cv) > 1:
            user_idx += 1
            conf.user = conf.user_names[user_idx]

            # Generate root path and edit
            root_path = model.get_root_path()
            model.root_path = "%s_cv_%s_%s" % (root_path, conf.d, conf.user)
            paths.path_exists(model.root_path)
            rmdir(root_path)

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=50,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False

        conf.run(train_index,
                 test_index,
                 lr=0.002,
                 n_epochs=300,
                 model=model,
                 train=train,
                 load_data=load_data)
def refine(dataloader: DataLoader, model: RNN, optimizer: torch.optim.Optimizer,
           loss_function: Union[SplitCrossEntropyLoss, CrossEntropyLoss], prior: Prior, bptt: int,
           use_apex: bool = False,
           amp=None, alpha: float = 0, beta: float = 0, importance: Union[int, float] = 100000,
           device: Union[torch.device, str] = 'cpu', **kwargs):
    model.train()
    batch = 0
    with tqdm(dataloader, total=len(dataloader)) as pbar:
        for data, targets, seq_len, lang in pbar:
            data = data.squeeze(0).to(device)
            targets = targets.squeeze(0).to(device)
            lang = lang.to(device)

            lr2 = optimizer.param_groups[0]['lr']
            optimizer.param_groups[0]['lr'] = lr2 * seq_len.item() / bptt
            hidden = model.init_hidden(batchsize=data.size(-1))
            optimizer.zero_grad()

            output, hidden, rnn_hs, dropped_rnn_hs = model(data, hidden, lang, return_h=True)
            if isinstance(loss_function, SplitCrossEntropyLoss):
                loss = loss_function(model.decoder.weight, model.decoder.bias, output, targets)
            else:
                loss = loss_function(output, targets)

            penalty = importance * prior.penalty(model)
            loss += penalty

            # Activiation Regularization
            if alpha:
                loss = loss + sum(alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
            # Temporal Activation Regularization (slowness)
            if beta:
                loss = loss + sum(beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])

            if use_apex:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()

            optimizer.step()

            optimizer.param_groups[0]['lr'] = lr2

            batch += 1

            pbar.set_description(
                'Loss {:5.2f} | bpc {:9.3f} | penalty {} |'.format(loss, loss / math.log(2), penalty.item()))
Exemple #9
0
 def multi_get_attention(self, frames):
     frames = self._reshape_to_conv(frames)
     cnn = CNN()
     cnn_output = cnn.create_model(frames, cnn.conv_filters)
     cnn_output = self._reshape_to_rnn(cnn_output)
     rnn = RNN()
     rnn_output = rnn.create_model(cnn_output)
     if self.is_attention:
         attention = Attention(self.batch_size)
         attention_output = attention.attention_analysis(rnn_output)
         return attention_output
     else:
         rnn_output = rnn_output[:, -1, :]
         fc = FC(self.num_classes)
         outputs = fc.create_model(rnn_output)
         return outputs
Exemple #10
0
    def __init__(self, config):
        super(MultiModalCore, self).__init__()
        self.config = config
        self.v_dim = config.v_dim
        self.q_emb_dim = config.q_emb_dim
        self.mmc_sizes = config.mmc_sizes
        self.mmc_layers = []
        self.input_dropout = nn.Dropout(p=config.input_dropout)

        # Create MLP with early fusion in the first layer followed by batch norm
        for mmc_ix in range(len(config.mmc_sizes)):
            if mmc_ix == 0:
                # @author : Bhanuka
                if config.additive_fusion or config.multiplicative_fusion:
                    in_s = self.v_dim
                elif config.concat_fusion:
                    in_s = self.v_dim + self.q_emb_dim
                elif config.question_fusion:
                    in_s = self.v_dim + (self.q_emb_dim * 2)
                self.batch_norm_fusion = nn.BatchNorm1d(in_s)
            else:
                in_s = config.mmc_sizes[mmc_ix - 1]
            out_s = config.mmc_sizes[mmc_ix]
            lin = LinearWithDropout(in_s, out_s, dropout_p=config.mmc_dropout)
            self.mmc_layers.append(lin)
            nonlin = getattr(nonlinearity, config.mmc_nonlinearity)()
            self.mmc_layers.append(nonlin)

        self.mmc_layers = nn.ModuleList(self.mmc_layers)
        self.batch_norm_mmc = nn.BatchNorm1d(self.mmc_sizes[-1])

        # Aggregation
        # @author : Bhanuka
        if not self.config.disable_late_fusion:
            if not self.config.disable_batch_norm_for_late_fusion:
                if config.additive_fusion or config.multiplicative_fusion:
                    self.batch_norm_before_aggregation = nn.BatchNorm1d(out_s)
                elif config.concat_fusion:
                    out_s += config.q_emb_dim
                    self.batch_norm_before_aggregation = nn.BatchNorm1d(out_s)
                elif config.question_fusion:
                    out_s += 2 * config.q_emb_dim
                self.batch_norm_before_aggregation = nn.BatchNorm1d(out_s)

        # Transformer
        # @author : Bhanuka
        if config.transformer_aggregation:
            self.aggregator = transformer.TransformerModel(
                config.ta_ntoken, config.ta_ninp, config.ta_nheads,
                config.ta_nhid, config.ta_nencoders, config.ta_dropout)
            for p in self.aggregator.parameters():
                if p.dim() > 1:
                    nn.init.xavier_uniform_(p)

        else:
            self.aggregator = RNN(out_s,
                                  config.mmc_aggregator_dim,
                                  nlayers=config.mmc_aggregator_layers,
                                  bidirect=True)
def evaluate(dataloader: DataLoader, model: RNN, loss_function: Union[SplitCrossEntropyLoss, CrossEntropyLoss],
             only_l: Union[torch.Tensor, int] = None, device: Union[torch.device, str] = 'cpu', **kwargs):
    model.eval()

    languages = dataloader.dataset.data.keys()
    if only_l:
        if only_l not in languages:
            raise ValueError(f'Language {only_l} does not exist in the dataset')
        local_losses = {only_l: 0}
    else:
        local_losses = {lang: 0 for lang in languages}

    batch = 0
    prev_lang = ""

    with tqdm(dataloader, total=len(dataloader)) as pbar:
        for data, targets, seq_len, lang in pbar:
            data = data.squeeze(0).to(device)
            targets = targets.squeeze(0).to(device)
            lang = lang.to(device)

            if only_l and only_l != lang:
                continue

            if prev_lang != lang:
                prev_lang = lang
                hidden = model.init_hidden(batchsize=data.size(-1))
            else:
                detach(hidden)

            with torch.no_grad():
                output, hidden = model(data, hidden, lang)
                if isinstance(loss_function, SplitCrossEntropyLoss):
                    loss = loss_function(model.decoder.weight, model.decoder.bias, output, targets)
                else:
                    loss = loss_function(output, targets)
                local_losses[lang.item()] += len(data) * loss.data

            batch += 1

            pbar.set_description('Evaluation, finished batch {} | loss {}'.format(batch, loss.data))

    avg_loss = {lang: local_losses[lang].item() / len(dataloader.dataset.data[lang]) for lang in languages} if only_l is None else {only_l: local_losses[only_l].item() / len(dataloader.dataset.data[only_l])}
    total_loss = sum(avg_loss.values())

    return total_loss / len(languages), avg_loss
def models(m):
    if m == 'rnn':
        return RNN(1, opt.hidden_size, opt.num_layers, 1, opt.cuda)
    elif m == 'lstm':
        return LSTM(1, opt.hidden_size, opt.num_layers, 1, opt.cuda)
    elif m == 'qrnn':
        return QRNN(1, opt.hidden_size, opt.num_layers, 1, opt.cuda)
    elif m == 'cnn':
        return CNN(1, opt.hidden_size, 1, opt.cuda)
Exemple #13
0
    def get_multi_predictions(self, frames):
        frames = self._reshape_to_conv(frames)
        cnn = CNN()
        if self.operation == 'training':
            cnn_output = cnn.create_model(frames,
                                          cnn.conv_filters,
                                          keep_prob=self.keep_prob)
        else:
            cnn_output = cnn.create_model(frames,
                                          cnn.conv_filters,
                                          keep_prob=1.0)
        cnn_output = self._reshape_to_rnn(cnn_output)
        rnn = RNN()
        arousal_rnn_output = rnn.create_model(cnn_output, 'arousal_rnn')
        valence_rnn_output = rnn.create_model(cnn_output, 'valence_rnn')
        dominance_rnn_output = rnn.create_model(cnn_output, 'dominance_rnn')
        if self.is_attention:
            attention = Attention(self.batch_size)
            arousal_attention_output = attention.create_model(
                arousal_rnn_output, 'arousal_attention')
            valence_attention_output = attention.create_model(
                valence_rnn_output, 'valence_attention')
            dominance_attention_output = attention.create_model(
                dominance_rnn_output, 'dominance_attention')
            fc = FC(self.num_classes)
            arousal_fc_outputs = fc.create_model(arousal_attention_output,
                                                 'arousal_fc')
            valence_fc_outputs = fc.create_model(valence_attention_output,
                                                 'valence_fc')
            dominance_fc_outputs = fc.create_model(dominance_attention_output,
                                                   'dominance_fc')
        else:
            arousal_rnn_output = arousal_rnn_output[:, -1, :]
            valence_rnn_output = valence_rnn_output[:, -1, :]
            dominance_rnn_output = dominance_rnn_output[:, -1, :]
            fc = FC(self.num_classes)
            arousal_fc_outputs = fc.create_model(arousal_rnn_output,
                                                 'arousal_fc')
            valence_fc_outputs = fc.create_model(valence_rnn_output,
                                                 'valence_fc')
            dominance_fc_outputs = fc.create_model(dominance_rnn_output,
                                                   'dominance_fc')

        return arousal_fc_outputs, valence_fc_outputs, dominance_fc_outputs
    def __init__(self,
                 out_dim,
                 v_hdim,
                 cnn_fdim,
                 no_cnn=False,
                 frame_shape=(3, 64, 64),
                 mlp_dim=(300, 200),
                 cnn_type='resnet',
                 v_net_type='lstm',
                 v_net_param=None,
                 cnn_rs=True,
                 causal=False,
                 device=None):
        super().__init__()
        self.out_dim = out_dim
        self.cnn_fdim = cnn_fdim
        self.v_hdim = v_hdim
        self.no_cnn = no_cnn
        self.cnn_type = cnn_type
        self.frame_shape = frame_shape
        self.device = device

        if no_cnn:
            self.cnn = None
        else:
            self.frame_shape = (1, 32, 32, 64)
            """ only for ResNet based models """
            if v_net_param is None:
                v_net_param = {}
            spec = v_net_param.get('spec', 'resnet18')
            self.cnn = P2PsfNet(cnn_fdim,
                                device=self.device,
                                running_stats=cnn_rs,
                                spec=spec)
        self.v_net_type = v_net_type

        if v_net_type == 'lstm':
            self.v_net = RNN(cnn_fdim, v_hdim, v_net_type, bi_dir=not causal)
        elif v_net_type == 'tcn':
            if v_net_param is None:
                v_net_param = {}
            tcn_size = v_net_param.get('size', [64, 128])
            dropout = v_net_param.get('dropout', 0.2)
            kernel_size = v_net_param.get('kernel_size', 3)
            assert tcn_size[-1] == v_hdim
            self.v_net = TemporalConvNet(cnn_fdim,
                                         tcn_size,
                                         kernel_size=kernel_size,
                                         dropout=dropout,
                                         causal=causal)
        if self.v_net_type is 'no_lstm':
            self.mlp = MLP(self.cnn_fdim, mlp_dim, 'relu')
        else:
            self.mlp = MLP(v_hdim, mlp_dim, 'relu')
        self.linear = nn.Linear(self.mlp.out_dim, out_dim)
def main_rnn(ckpt, filename):
    # Load Data for Inference
    data = np.load('./data/lpd_5_cleansed.npy')
    data = np.reshape(data, [21425, 400, -1])
    dataset = DataSet(data)

    del data
    gc.collect()

    # Model Object
    model = RNN()

    # Restore Model Variables
    saver = tf.train.Saver()
    print('Restoring model from {} \n'.format(ckpt))
    saver.restore(model.sess, ckpt)

    # Generate
    model.generate(dataset, filename)
    print("Write to midi file %s done.\n" % filename)
Exemple #16
0
    def __init__(self, cnn_feat_dim, state_dim, v_hdim=128, v_margin=10, v_net_type='lstm', v_net_param=None,
                 s_hdim=None, s_net_type='id', dynamic_v=False):
        super().__init__()
        s_hdim = state_dim if s_hdim is None else s_hdim
        self.mode = 'test'
        self.cnn_feat_dim = cnn_feat_dim
        self.state_dim = state_dim
        self.v_net_type = v_net_type
        self.v_hdim = v_hdim
        self.v_margin = v_margin
        self.s_net_type = s_net_type
        self.s_hdim = s_hdim
        self.dynamic_v = dynamic_v
        self.out_dim = v_hdim + s_hdim

        if v_net_type == 'lstm':
            self.v_net = RNN(cnn_feat_dim, v_hdim, v_net_type, bi_dir=False)
        elif v_net_type == 'tcn':
            if v_net_param is None:
                v_net_param = {}
            tcn_size = v_net_param.get('size', [64, 128])
            dropout = v_net_param.get('dropout', 0.2)
            kernel_size = v_net_param.get('kernel_size', 3)
            assert tcn_size[-1] == v_hdim
            self.v_net = TemporalConvNet(cnn_feat_dim, tcn_size, kernel_size=kernel_size, dropout=dropout, causal=True)

        if s_net_type == 'lstm':
            self.s_net = RNN(state_dim, s_hdim, s_net_type, bi_dir=False)

        self.v_out = None
        self.t = 0
        # training only
        self.indices = None
        self.s_scatter_indices = None
        self.s_gather_indices = None
        self.v_gather_indices = None
        self.cnn_feat_ctx = None
        self.num_episode = None
        self.max_episode_len = None
        self.set_mode('test')
Exemple #17
0
def main_rnn():
    # Prepare Data
    data_file = './data/lpd_5_cleansed.npy'
    data = np.load(data_file)
    print(np.shape(data))

    data = np.reshape(data, [21425, 400, -1])
    print(np.shape(data))
    print(data.dtype)

    dataset = read_data_sets(data)
    train_set = dataset.train
    develop_set = dataset.develop
    test_set = dataset.test

    # release space
    del data
    del dataset
    gc.collect()

    # Create Model Object
    model = RNN()

    # Train
    log_tag = "20180518-1030"
    model.train(train_set, develop_set, log_tag)
    plot_gif(fig_dir='./logdir/%s/results' % log_tag)  # plot

    # Test
    model.test(test_set, log_tag)
Exemple #18
0
def getModel(args):

    if args.model == "rnn":
        model = RNN(input_dim=args.input_dims,
                    nclasses=args.nclasses,
                    hidden_dims=args.hidden_dims,
                    num_rnn_layers=args.num_layers,
                    dropout=args.dropout,
                    bidirectional=True)

    if args.model == "msresnet":
        model = MSResNet(input_channel=args.input_dims,
                         layers=[1, 1, 1, 1],
                         num_classes=args.nclasses,
                         hidden_dims=args.hidden_dims)

    if args.model == "tempcnn":
        model = TempCNN(input_dim=args.input_dims,
                        nclasses=args.nclasses,
                        sequence_length=args.samplet,
                        hidden_dims=args.hidden_dims,
                        kernel_size=args.kernel_size)

    elif args.model == "transformer":

        hidden_dims = args.hidden_dims  # 256
        n_heads = args.n_heads  # 8
        n_layers = args.n_layers  # 6
        len_max_seq = args.samplet
        dropout = args.dropout
        d_inner = hidden_dims * 4

        model = TransformerEncoder(in_channels=args.input_dims,
                                   len_max_seq=len_max_seq,
                                   d_word_vec=hidden_dims,
                                   d_model=hidden_dims,
                                   d_inner=d_inner,
                                   n_layers=n_layers,
                                   n_head=n_heads,
                                   d_k=hidden_dims // n_heads,
                                   d_v=hidden_dims // n_heads,
                                   dropout=dropout,
                                   nclasses=args.nclasses)

    if torch.cuda.is_available():
        model = model.cuda()

    pytorch_total_params = sum(p.numel() for p in model.parameters())
    print("initialized {} model ({} parameters)".format(
        args.model, pytorch_total_params))

    return model
Exemple #19
0
 def __init__(self, nx, ny, nk, specs):
     super(NFDiag, self).__init__()
     self.nx = nx
     self.ny = ny
     self.nk = nk
     self.nh = nh = specs.get('nh_mlp', [300, 200])
     self.nh_rnn = nh_rnn = specs.get('nh_rnn', 128)
     self.rnn_type = rnn_type = specs.get('rnn_type', 'gru')
     self.x_birnn = x_birnn = specs.get('x_birnn', False)
     self.fix_first = fix_first = specs.get('fix_first', False)
     self.nac = nac = nk - 1 if fix_first else nk
     self.x_rnn = RNN(nx, nh_rnn, bi_dir=x_birnn, cell_type=rnn_type)
     self.mlp = MLP(nh_rnn, nh)
     self.head_A = nn.Linear(nh[-1], ny * nac)
     self.head_b = nn.Linear(nh[-1], ny * nac)
    def processParams(self, params):
        # Directory for experience
        directory = self.setupDir(params)
        # Set up generator
        generator = InputGenerator(
            self.gt,
            chunk_size=params["nb_timesteps"],
            batch_size=params["batch_size"],
        )
        # Set up model
        model = RNN(params)
        model = model.model
        # Set up loss
        loss = CustomLoss(
            lambda_roll=params["lambda_roll"],
            lambda_pitch=params["lambda_pitch"],
            lambda_yaw=params["lambda_yaw"],
            lambda_thrust=params["lambda_thrust"],
            loss_func=params["loss_func"],
        )
        decay = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=2,
                                  verbose=1,
                                  min_lr=1e-6)
        # Set up loss
        optimizer = Adam(
            lr=params["lr"],
            decay=params["decay"],
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-08,
        )

        # Set up callbacks
        mcp = ModelCheckpoint(
            directory + "/model.{epoch:03d}.h5",
            verbose=1,
            save_weights_only=False,
        )
        lrp = PrintLR()
        #lrs = LearningRateScheduler(step_decay)
        callbacks = [mcp, lrp, decay]
        # return all params
        return generator, model, loss, optimizer, callbacks
Exemple #21
0
    def __init__(self, out_dim, v_hdim, cnn_fdim, dtype, device, frame_num=10, camera_num=3, frame_shape=(3, 224, 224), mlp_dim=(128, 64),
                 v_net_type='lstm', v_net_param=None, bi_dir=False, training=True, is_dropout=False):
        super().__init__()
        self.out_dim = out_dim
        self.cnn_fdim = cnn_fdim
        self.v_hdim = v_hdim
        self.frame_shape = frame_shape
        self.camera_num = camera_num
        self.cnn = ResNet(cnn_fdim, running_stats=training)
        self.dtype = dtype
        self.device = device

        self.v_net_type = v_net_type
        self.v_net = RNN(cnn_fdim * 2, v_hdim, bi_dir=bi_dir)
        #self.v_net = nn.LSTM(cnn_fdim * 2, v_hdim, 2, batch_first=True, dropout=0.01, bidirectional=bi_dir)
        self.mlp = MLP(v_hdim, mlp_dim, 'leaky', is_dropout=is_dropout)
        self.linear = nn.Linear(self.mlp.out_dim, out_dim)
        self.softmax = nn.Softmax(dim=1)
Exemple #22
0
    def __init__(self, img_model, seq_model):
        super().__init__() 

        self.img_model, self.seq_model = None, None

        if img_model == "slow_fusion":
            from models.slow_fusion import SlowFusion 
            self.img_model = SlowFusion(3, 10, 64)
        elif img_model == "early_fusion": 
            from models.early_fusion import EarlyFusion
            self.img_model = EarlyFusion(3, 10, 64)
        elif img_model == "late_fusion": 
            from models.late_fusion import LateFusion
            self.img_model = LateFusion(3, 10, 64)
        elif img_model == "vanilla_cnn":
            from models.basic_cnn import BasicCNN
            self.img_model = BasicCNN(3, 64)
        else: 
            from models.imagenet_model_wrapper import ImageNet_Model_Wrapper
            self.img_model = ImageNet_Model_Wrapper(img_model)

        if seq_model == "vanilla_rnn": 
            from models.rnn import RNN
            self.seq_model = RNN(512, 256, 2)
        elif seq_model == "lstm": 
            from models.lstm import LSTM
            self.seq_model = LSTM(512, 256, num_layers=2, dropout=0.1, bidirectional=True)
        elif seq_model == "lstmn": 
            from models.lstmn import BiLSTMN
            self.seq_model = BiLSTMN(512, 256, num_layers=2, dropout=0.1, tape_depth=10)
        elif seq_model == "transformer_abs": 
            from models.transformer import Transformer 
            self.seq_model = Transformer(512, 8)
        elif seq_model == "stack_lstm": 
            from models.stack_lstm import EncoderLSTMStack
            self.seq_model = EncoderLSTMStack(512, 256)

        # attention over seq_model output
        self.query_vector = nn.Parameter(torch.randn(1, 64))
        # self.attn_w  = nn.Bilinear(64, 512, 1)
        self.attn_w = nn.Parameter(torch.randn(64, 512))

        self.linear1 = nn.Linear(512, 32)
        self.linear2 = nn.Linear(32, 1)
Exemple #23
0
    def __init__(self,
                 out_dim,
                 v_hdim,
                 cnn_fdim,
                 no_cnn=False,
                 frame_shape=(3, 224, 224),
                 mlp_dim=(300, 200),
                 cnn_type='resnet',
                 v_net_type='lstm',
                 v_net_param=None,
                 causal=False):
        super().__init__()
        self.out_dim = out_dim
        self.cnn_fdim = cnn_fdim
        self.v_hdim = v_hdim
        self.no_cnn = no_cnn
        self.frame_shape = frame_shape
        if no_cnn:
            self.cnn = None
        elif cnn_type == 'resnet':
            self.cnn = ResNet(cnn_fdim)
        elif cnn_type == 'mobile':
            self.cnn = MobileNet(cnn_fdim)

        self.v_net_type = v_net_type
        if v_net_type == 'lstm':
            self.v_net = RNN(cnn_fdim, v_hdim, v_net_type, bi_dir=not causal)
        elif v_net_type == 'tcn':
            if v_net_param is None:
                v_net_param = {}
            tcn_size = v_net_param.get('size', [64, 128])
            dropout = v_net_param.get('dropout', 0.2)
            kernel_size = v_net_param.get('kernel_size', 3)
            assert tcn_size[-1] == v_hdim
            self.v_net = TemporalConvNet(cnn_fdim,
                                         tcn_size,
                                         kernel_size=kernel_size,
                                         dropout=dropout,
                                         causal=causal)
        self.mlp = MLP(v_hdim, mlp_dim, 'relu')
        self.linear = nn.Linear(self.mlp.out_dim, out_dim)
Exemple #24
0
 def __init__(self,
              cnn_feat_dim,
              v_hdim=128,
              v_margin=10,
              v_net_type='lstm',
              v_net_param=None,
              causal=False):
     super().__init__()
     self.mode = 'test'
     self.cnn_feat_dim = cnn_feat_dim
     self.v_net_type = v_net_type
     self.v_hdim = v_hdim
     self.v_margin = v_margin
     if v_net_type == 'lstm':
         self.v_net = RNN(cnn_feat_dim,
                          v_hdim,
                          v_net_type,
                          bi_dir=not causal)
     elif v_net_type == 'tcn':
         if v_net_param is None:
             v_net_param = {}
         tcn_size = v_net_param.get('size', [64, 128])
         dropout = v_net_param.get('dropout', 0.2)
         kernel_size = v_net_param.get('kernel_size', 3)
         assert tcn_size[-1] == v_hdim
         self.v_net = TemporalConvNet(cnn_feat_dim,
                                      tcn_size,
                                      kernel_size=kernel_size,
                                      dropout=dropout,
                                      causal=causal)
     self.v_out = None
     self.t = 0
     # training only
     self.indices = None
     self.scatter_indices = None
     self.gather_indices = None
     self.cnn_feat_ctx = None
Exemple #25
0
    def __init__(self, config):
        super(MultiModalCore, self).__init__()
        self.config = config
        self.v_dim = config.v_dim
        self.q_emb_dim = config.q_emb_dim
        self.mmc_sizes = config.mmc_sizes
        self.mmc_layers = []
        self.input_dropout = nn.Dropout(p=config.input_dropout)

        # Create MLP with early fusion in the first layer followed by batch norm
        for mmc_ix in range(len(config.mmc_sizes)):
            if mmc_ix == 0:
                if config.disable_early_fusion:
                    in_s = self.v_dim
                else:
                    in_s = self.v_dim + self.q_emb_dim
                self.batch_norm_fusion = nn.BatchNorm1d(in_s)
            else:
                in_s = config.mmc_sizes[mmc_ix - 1]
            out_s = config.mmc_sizes[mmc_ix]
            # lin = nn.Linear(in_s, out_s)
            lin = LinearWithDropout(in_s, out_s, dropout_p=config.mmc_dropout)
            self.mmc_layers.append(lin)
            nonlin = getattr(nonlinearity, config.mmc_nonlinearity)()
            self.mmc_layers.append(nonlin)

        self.mmc_layers = nn.ModuleList(self.mmc_layers)
        self.batch_norm_mmc = nn.BatchNorm1d(self.mmc_sizes[-1])

        # Aggregation
        if not self.config.disable_late_fusion:
            out_s += config.q_emb_dim
            if not self.config.disable_batch_norm_for_late_fusion:
                self.batch_norm_before_aggregation = nn.BatchNorm1d(out_s)
        self.aggregator = RNN(out_s, config.mmc_aggregator_dim, nlayers=config.mmc_aggregator_layers,
                              bidirect=True)
Exemple #26
0
 output_len = 12
 if args.model_type == 'cnn':
     input_len = next(iter(trainer.train_dataloader))[0].shape[1]
     num_input_channels = next(iter(trainer.train_dataloader))[0].shape[2]
     encoder_params = CNN.generate_params()
     decoder_params = MLP.generate_params()
     model = CNN(
         num_input_channels=num_input_channels,
         input_size=input_len,
         output_len=output_len,
         encoder_params=encoder_params,
         decoder_params=decoder_params,
     )
 elif args.model_type == 'rnn':
     num_input_channels = next(iter(trainer.train_dataloader))[0].shape[2]
     rnn_params = RNN.generate_params()
     model = RNN(output_len=output_len,
                 num_input_channels=num_input_channels,
                 rnn_params=rnn_params)
 elif args.model_type == 'crnn':
     input_len = next(iter(trainer.train_dataloader))[0].shape[1]
     num_input_channels = next(iter(trainer.train_dataloader))[0].shape[2]
     encoder_params = CNN.generate_params()
     rnn_params = RNN.generate_params()
     model = CRNN(input_len=input_len,
                  output_len=output_len,
                  num_input_channels=num_input_channels,
                  encoder_params=encoder_params,
                  rnn_params=rnn_params)
 elif args.model_type == 'mlp':
     input_len = reduce(lambda x, y: x * y,
def active_train():
    init_learning_rate = params["LEARNING_RATE"]
    init_selection_size = params["SELECTION_SIZE"]

    init_data = {}
    init_data["train_y"] = copy.deepcopy(data["train_y"])
    init_data["train_x"] = copy.deepcopy(data["train_x"])

    average_accs = {}
    average_losses = {}

    if params["MODEL"] == "cnn":
        model = CNN()
    elif params["MODEL"] == "rnn":
        model = RNN(params, data)
    else:
        model = CNN(data, params)

    if params["CUDA"]:
        model.cuda()

    models["CLASSIFIER"] = model

    for j in range(params["N_AVERAGE"]):
        params["LEARNING_RATE"] = init_learning_rate
        params["SELECTION_SIZE"] = init_selection_size

        data["train_x"]  = copy.deepcopy(init_data["train_x"])
        data["train_y"]  = copy.deepcopy(init_data["train_y"])

        lg = None
        if params["LOG"]:
            lg = global_logger["lg"]

            start_accuracy = 100 / params["CLASS_SIZE"]
            lg.scalar_summary("test-acc", start_accuracy, 0)
            lg.scalar_summary("test-acc-avg", start_accuracy, 0)

        print("-" * 20, "Round {}".format(j + 1), "-" * 20)
        model.init_model()
        train_features = []
        train_targets = []
        distribution = {}

        for key in range(len(data["classes"])):
            distribution[key] = []

        data["train_x"], data["train_y"] = shuffle(data["train_x"], data["train_y"])

        if 500 % params["SELECTION_SIZE"] == 0:
            n_rounds = int(500 / params["SELECTION_SIZE"])
            last_selection_size = params["SELECTION_SIZE"]
        else:
            n_rounds = int(500 / params["SELECTION_SIZE"]) + 1
            last_selection_size = 500 % params["SELECTION_SIZE"]

        for i in range(n_rounds):
            if (n_rounds - 1 == i):
                params["SELECTION_SIZE"] = last_selection_size

            if params["SCORE_FN"] == "all":
                t1, t2 = select_all(model, lg, i)
            elif params["SCORE_FN"] == "entropy":
                t1, t2 = select_entropy(model, lg, i)
            elif params["SCORE_FN"] == "egl":
                t1, t2 = select_egl(model, lg, i)
            elif params["SCORE_FN"] == "random":
                t1, t2 = select_random(model, lg, i)

            train_features.extend(t1)
            train_targets.extend(t2)

            print("\n")
            model.init_model()
            model = train(model, train_features, train_targets)
            accuracy, loss, corrects, size = evaluate(model, i, mode="test")
            print("{:10s} loss: {:10.6f} acc: {:10.4f}%({}/{}) \n".format("test",
                                                                          loss, accuracy, corrects, size))
            if i not in average_accs:
                average_accs[i] = [accuracy]
            else:
                average_accs[i].append(accuracy)

            if i not in average_losses:
                average_losses[i] = [loss]

            else:
                average_losses[i].append(loss)

            if params["LOG"]:
                lg.scalar_summary("test-acc", accuracy, len(train_features))
                lg.scalar_summary(
                    "test-acc-avg", sum(average_accs[i]) / len(average_accs[i]), len(train_features))

                lg.scalar_summary("test-loss", loss, len(train_features))
                lg.scalar_summary(
                    "test-loss-avg", sum(average_losses[i]) / len(average_losses[i]), len(train_features))

                for each in range(len(data["classes"])):
                    val = train_targets.count(each) / len(train_targets)
                    distribution[each].append(val)

                # count number of positive and negativ added to labeledpool.
                # nameOfFile = '{}/distribution{}.html'.format(lg.log_dir, j)
                

    best_model = {}
    return best_model
Exemple #28
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--save',
                        '-s',
                        type=str,
                        default='',
                        help='save name to test')
    args = parser.parse_args()

    save = args.save
    # Prepare logger
    # - create logger with 'spam_application'
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    # - create file handler which logs even debug messages
    fh = logging.FileHandler('test_result/{}.log'.format(save.split('/')[1]),
                             mode='w')
    fh.setLevel(logging.INFO)
    # - create formatter and add it to the handlers
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    # - add the handlers to the logger
    logger.addHandler(fh)

    # Parse save name
    hp_dict = parse_hyparams(save.split('/')[1])

    # Prepare datasets
    train, dev, test = reader.read(hp_dict['x'], hp_dict['y'])

    # Prepare linearizer and converter
    global l, c

    # Model setup
    if hp_dict['r'] == 0:
        l = linearizer.linearize_tree
        c = converter.convert
        params = {
            'n_embed': hp_dict['x'] + 3,
            'd': hp_dict['d'],
            'k': hp_dict['k']
        }
        if hp_dict['m'] == 'n':
            model = RNN(**params)
        elif hp_dict['m'] == 't':
            model = RNTN(**params)
        elif hp_dict['m'] == 'd':
            model = RNTNd(**params)
        elif hp_dict['m'] == 'c':
            model = RNTNc(**params)

    elif hp_dict['r'] == 1:
        l = linearizer.linearize_tree_relational
        c = converter.convert_relational
        params = {
            'n_embed': hp_dict['x'] * 2,
            'd': hp_dict['d'],
            'k': hp_dict['k']
        }
        if hp_dict['m'] == 'rn':
            model = RNNr(**params)
        elif hp_dict['m'] == 'rt':
            params['mp'] = hp_dict['v']
            model = RNTNr(**params)
        elif hp_dict['m'] == 'rd':
            params['mp'] = hp_dict['v']
            model = RNTNrd(**params)
        elif hp_dict['m'] == 'rc':
            params['mp'] = hp_dict['v']
            model = RNTNrc(**params)
        elif hp_dict['m'] == 'rs':
            model = RNTNrs(**params, p=hp_dict['p'], q=hp_dict['q'])

    print(save)
    serializers.load_hdf5(save, model)
    model = L.Classifier(model)
    model.to_cpu()

    # Dev
    logger.info('---dev---')
    accuracy = get_accuracy(model, dev)
    logger.info('dev: {}'.format(accuracy))

    # Test
    logger.info('---test---')
    for length, a_bin in enumerate(test[1:]):
        accuracy = get_accuracy(model, a_bin)
        logger.info('test {} : {}'.format(length + 1, accuracy))
Exemple #29
0
def main():
    """
    parser = argparse.ArgumentParser()

    parser.add_argument('--save', '-s', type=str, default='',
                        help='save name to test')
    args = parser.parse_args()
    """
    # Parse save file name
    path = './trained_model_12-11'
    saves = os.listdir(path)
    for save in saves:
        # Prepare logger
        # - create logger with 'spam_application'
        logger = logging.getLogger(__name__)
        logger.setLevel(logging.INFO)
        # - create file handler which logs even debug messages
        fh = logging.FileHandler('test_result/{}.log'.format(save), mode='w')
        fh.setLevel(logging.INFO)
        # - create formatter and add it to the handlers
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        fh.setFormatter(formatter)
        # - add the handlers to the logger
        logger.addHandler(fh)

        # Parse save name
        hp_dict = parse_hyparams(save)

        # Prepare datasets
        train, dev, test = reader.read(hp_dict['x'], hp_dict['y'])

        # Prepare linearizer and converter
        global l, c

        # Model setup
        if hp_dict['r'] == 0:
            l = linearizer.linearize_tree
            c = converter.convert
            params = {'n_embed': hp_dict['x'] + 3, 'd': hp_dict['d'], 'k': hp_dict['k']}
            if hp_dict['m'] == 'n':
                model = RNN(**params)
            elif hp_dict['m'] == 't':
                model = RNTN(**params)
            elif hp_dict['m'] == 'd':
                model = RNTNd(**params)
            elif hp_dict['m'] == 'c':
                model = RNTNc(**params)

        elif hp_dict['r'] == 1:
            l = linearizer.linearize_tree_relational
            c = converter.convert_relational
            params = {'n_embed': hp_dict['x'] * 2, 'd': hp_dict['d'], 'k': hp_dict['k']}
            if hp_dict['m'] == 'rn':
                model = RNNr(**params)
            elif hp_dict['m'] == 'rt':
                model = RNTNr(**params)
            elif hp_dict['m'] == 'rd':
                model = RNTNrd(**params)
            elif hp_dict['m'] == 'rc':
                model = RNTNrc(**params)
            elif hp_dict['m'] == 'rs':
                model = RNTNrs(**params, p=hp_dict['p'], q=hp_dict['q'])

        print(save)
        serializers.load_hdf5(path + '/' + save, model)
        model = L.Classifier(model)
        model.to_cpu()

        m = hp_dict['m']
        w = hp_dict['w']
        i = hp_dict['i'] - 1
        p = hp_dict['p']

        # Dev
        logger.info('---dev---')
        accuracy = get_accuracy(model, dev)
        logger.info('dev: {}'.format(accuracy))
        if m == 'rs':
            results[p][w][0][i] = accuracy
        else:
            results[m][w][0][i] = accuracy

        # Test
        logger.info('---test---')
        for length, a_bin in enumerate(test[1:]):
            accuracy = get_accuracy(model, a_bin)
            logger.info('test {} : {}'.format(length + 1, accuracy))
            if hp_dict['m'] == 'rs':
                results[p][w][length + 1][i] = accuracy
            else:
                results[m][w][length + 1][i] = accuracy

        logger.removeHandler(fh)
    logger.setLevel(logging.INFO)
    fh = logging.FileHandler('test_result/final.log', mode='w')
    fh.setLevel(logging.INFO)
    fh.setFormatter(formatter)
    logger.addHandler(fh)
    for m in results.keys():
        logger.info('')
        logger.info('m: {}'.format(m))
        for w in results[m].keys():
            logger.info('w: {}---------'.format(w))
            for j, accuracies in enumerate(results[m][w]):
                print(accuracies)
                std = np.std(accuracies)
                mean = np.mean(accuracies)
                if j == 0:
                    logger.info('dev: {} ({})'.format(mean, std))
                else:
                    logger.info('test {} : {} ({})'.format(j, mean, std))

    with open('final.p', 'wb') as f:
        pickle.dump(results, f)
Exemple #30
0
results_dir = dir + '/results'
rnn_dir = dir + '/' + args.model_dir

config = vars(args)
config['log_dir'] = rnn_dir
config['restore_embedding'] = False
config['seq_length'] = None
input_words = clean_text(config['inputs'])
# if args.use_server is True:
#     with open('clusterSpec.json') as f:
#         clusterSpec = json.load(f)
#     config['target'] = 'grpc://' + clusterSpec['server'][0]
#     pass

rnn = RNN(config)
y = rnn.predict(input_words, config)
print('__BBB_START__')  # Marker for the Regexp used in the App, do not remove
json = json.dumps({
    'config': {
        'inputs': args.inputs,
        'random': args.random,
        'temperature': args.temperature,
        'top_k': args.top_k,
        'nb_word': args.nb_word,
        'nb_sentence': args.nb_sentence,
        'nb_para': args.nb_para,
    },
    'output': ' '.join(y)
})
print(json)