Пример #1
0
def easy_activity_plot(model_dir, rule):
    """A simple plot of neural activity from one task.

    Args:
        model_dir: directory where model file is saved
        rule: string, the rule to plot
    """

    model = Model(model_dir)
    hp = model.hp

    with tf.Session() as sess:
        model.restore()

        trial = generate_trials(rule, hp, mode='test')
        feed_dict = tools.gen_feed_dict(model, trial, hp)
        h, y_hat = sess.run([model.h, model.y_hat], feed_dict=feed_dict)
        # All matrices have shape (n_time, n_condition, n_neuron)

    # Take only the one example trial
    i_trial = 0

    for activity, title in zip([trial.x, h, y_hat],
                               ['input', 'recurrent', 'output']):
        plt.figure()
        plt.imshow(activity[:, i_trial, :].T,
                   aspect='auto',
                   cmap='hot',
                   interpolation='none',
                   origin='lower')
        plt.title(title)
        plt.colorbar()
        plt.show()
Пример #2
0
def run_network_replacerule(model_dir, rule, replace_rule, rule_strength):
    """Run the network but with replaced rule input weights.

    Args:
        model_dir: model directory
        rule: the rule to test on
        replace_rule: a list of rule input units to use
        rule_strength: the relative strength of each replace rule unit
    """
    model = Model(model_dir)
    hp = model.hp
    with tf.Session() as sess:
        model.restore()

        # Get performance
        batch_size_test = 1000
        n_rep = 20
        batch_size_test_rep = int(batch_size_test / n_rep)
        perf_rep = list()
        for i_rep in range(n_rep):
            trial = generate_trials(rule,
                                    hp,
                                    'random',
                                    batch_size=batch_size_test_rep,
                                    replace_rule=replace_rule,
                                    rule_strength=rule_strength)
            feed_dict = tools.gen_feed_dict(model, trial, hp)
            y_hat_test = sess.run(model.y_hat, feed_dict=feed_dict)

            perf_rep.append(np.mean(get_perf(y_hat_test, trial.y_loc)))

    return np.mean(perf_rep), rule_strength
Пример #3
0
def easy_connectivity_plot(model_dir):
    """A simple plot of network connectivity."""

    model = Model(model_dir)
    with tf.Session() as sess:
        model.restore()
        # get all connection weights and biases as tensorflow variables
        var_list = model.var_list
        # evaluate the parameters after training
        params = [sess.run(var) for var in var_list]
        # get name of each variable
        names = [var.name for var in var_list]

    # Plot weights
    for param, name in zip(params, names):
        if len(param.shape) != 2:
            continue

        vmax = np.max(abs(param)) * 0.7
        plt.figure()
        # notice the transpose
        plt.imshow(param.T,
                   aspect='auto',
                   cmap='bwr',
                   vmin=-vmax,
                   vmax=vmax,
                   interpolation='none',
                   origin='lower')
        plt.title(name)
        plt.colorbar()
        plt.xlabel('From')
        plt.ylabel('To')
        plt.show()
Пример #4
0
    def __init__(self, model_dir, rules=None):
        """Initialization.

        Args:
            model_dir: str, model directory
            rules: None or a list of rules
        """
        # Stimulus-averaged traces
        h_stimavg_byrule = OrderedDict()
        h_stimavg_byepoch = OrderedDict()
        # Last time points of epochs
        h_lastt_byepoch = OrderedDict()

        model = Model(model_dir)
        hp = model.hp

        if rules is None:
            # Default value
            rules = hp['rules']
        n_rules = len(rules)

        with tf.Session() as sess:
            model.restore()

            for rule in rules:
                trial = generate_trials(rule=rule, hp=hp, mode='test')
                feed_dict = tools.gen_feed_dict(model, trial, hp)
                h = sess.run(model.h, feed_dict=feed_dict)

                # Average across stimulus conditions
                h_stimavg = h.mean(axis=1)

                # dt_new = 50
                # every_t = int(dt_new/hp['dt'])

                t_start = int(
                    500 / hp['dt'])  # Important: Ignore the initial transition
                # Average across stimulus conditions
                h_stimavg_byrule[rule] = h_stimavg[t_start:, :]

                for e_name, e_time in trial.epochs.items():
                    if 'fix' in e_name:
                        continue

                    # if ('fix' not in e_name) and ('go' not in e_name):
                    # Take epoch
                    e_time_start = e_time[0] - 1 if e_time[0] > 0 else 0
                    h_stimavg_byepoch[(
                        rule, e_name)] = h_stimavg[e_time_start:e_time[1], :]
                    # Take last time point from epoch
                    # h_all_byepoch[(rule, e_name)] = np.mean(h[e_time[0]:e_time[1],:,:][-1], axis=1)
                    h_lastt_byepoch[(rule, e_name)] = h[e_time[1], :, :]

        self.rules = rules
        self.h_stimavg_byrule = h_stimavg_byrule
        self.h_stimavg_byepoch = h_stimavg_byepoch
        self.h_lastt_byepoch = h_lastt_byepoch
        self.model_dir = model_dir
Пример #5
0
def run_experiments():
    experiments_hyper_params = json.load(open(hyper_params_path))
    for params in experiments_hyper_params:
        name = params.pop('name')
        layers_dims = params.pop('layers')
        layers_dims[-2]['num_nodes'] = NUM_CATEGORIES

        model = Model(X_train.shape[1], layers_dims, model_name=name)
        model.fit(X_train, X_val, y_train, y_val, **params)
        _predict_test(model, name)
Пример #6
0
def run_simulation(save_name, setting):
    '''Generate simulation data for all trials'''
    tf.reset_default_graph()
    model = Model(save_name, sigma_rec=setting['sigma_rec'], dt=10)

    with tf.Session() as sess:
        model.restore(sess)
        Data = _run_simulation(model, setting)

    return Data
Пример #7
0
def _compute_variance(model_dir, rules=None, random_rotation=False):
    """Compute variance for all tasks.

    Args:
        model_dir: str, the path of the model directory
        rules: list of rules to compute variance, list of strings
        random_rotation: boolean. If True, rotate the neural activity.
    """
    model = Model(model_dir, sigma_rec=0)
    with tf.Session() as sess:
        model.restore()
        _compute_variance_bymodel(model, sess, rules, random_rotation)
Пример #8
0
def synthesis(text, num):
    m = Model()
    # m_post = ModelPostNet()

    m.load_state_dict(load_checkpoint(num, "transformer"))
    # m_post.load_state_dict(load_checkpoint(args.restore_step2, "postnet"))

    text = np.asarray(text_to_sequence(text, [hp.cleaners]))
    text = t.LongTensor(text).unsqueeze(0)
    text = text.cuda()
    mel_input = t.zeros([1, 1, 80]).cuda()
    pos_text = t.arange(1, text.size(1) + 1).unsqueeze(0)
    pos_text = pos_text.cuda()

    m = m.cuda()
    # m_post = m_post.cuda()
    m.train(False)
    # m_post.train(False)

    # pbar = tqdm(range(args.max_len))
    with t.no_grad():
        for _ in range(1000):
            pos_mel = t.arange(1, mel_input.size(1) + 1).unsqueeze(0).cuda()
            mel_pred, postnet_pred, attn, stop_token, _, attn_dec = m.forward(
                text, mel_input, pos_text, pos_mel)
            mel_input = t.cat([mel_input, postnet_pred[:, -1:, :]], dim=1)

        # mag_pred = m_post.forward(postnet_pred)

    # wav = spectrogram2wav(mag_pred.squeeze(0).cpu().numpy())
    mel_postnet = postnet_pred[0].cpu().numpy().T
    plot_data([mel_postnet for _ in range(2)])
    wav = audio.inv_mel_spectrogram(mel_postnet)
    wav = wav[0:audio.find_endpoint(wav)]
    audio.save_wav(wav, "result.wav")
Пример #9
0
def test(path):
    model = Model()
    model.to("cuda:0")
    model.eval()
    checkpoint = torch.load("./model.pth")
    model.load_state_dict(checkpoint["model"])
    img = np.array(Image.open(path).resize([448, 448]))[np.newaxis]
    img = np.transpose(img, axes=[0, 3, 1, 2]) / 255
    img = torch.tensor(img, dtype=torch.float32).to("cuda:0")
    preds = model(img).cpu().detach().numpy()
    cell_h, cell_w = IMG_H / S, IMG_W / S
    x, y = np.meshgrid(range(S), range(S))
    preds_xywhs = []
    for i in range(B):
        preds_x = (preds[0, :, :, i * 4] + x) * cell_w
        preds_y = (preds[0, :, :, i * 4 + 1] + y) * cell_h
        preds_w = preds[0, :, :, i * 4 + 2] * IMG_W
        preds_h = preds[0, :, :, i * 4 + 3] * IMG_H
        preds_xywh = np.dstack((preds_x, preds_y, preds_w, preds_h))
        preds_xywhs.append(preds_xywh)
    preds_xywhs = np.dstack(preds_xywhs)
    preds_xywhs = np.reshape(preds_xywhs, [-1, 4])
    preds_class = preds[0, :, :, 10:]
    preds_class = np.reshape(preds_class, [-1, 20])
    preds_c = preds[0, :, :, 8:10]
    preds_c = np.reshape(preds_c, [-1, 1])
    max_arg = np.argmax(preds_c, axis=0)
    print("max confidence: %f" % (preds_c[max_arg]))
    max_arg_ = np.argmax(preds_class[int(max_arg // 2)])
    print("class confidence: %f" % (preds_class[max_arg // 2, max_arg_]))
    print("class category: %s" % (CLASSES[int(max_arg_)]))
    Image.fromarray(
        np.uint8(
            draw_bboxes(np.array(Image.open(path).resize([448, 448])),
                        preds_xywhs[max_arg[0]:max_arg[0] + 1]))).show()
Пример #10
0
def synthesis(text, args):
    m = Model()
    m_post = ModelPostNet()

    m.load_state_dict(load_checkpoint(args.restore_step1, "transformer"))
    m_post.load_state_dict(load_checkpoint(args.restore_step2, "postnet"))

    text = np.asarray(text_to_sequence(text, [hp.cleaners]))
    text = t.LongTensor(text).unsqueeze(0)
    text = text.cuda()
    mel_input = t.zeros([1,1, 80]).cuda()
    pos_text = t.arange(1, text.size(1)+1).unsqueeze(0)
    pos_text = pos_text.cuda()

    m=m.cuda()
    m_post = m_post.cuda()
    m.train(False)
    m_post.train(False)
    
    pbar = tqdm(range(args.max_len))
    with t.no_grad():
        for i in pbar:
            pos_mel = t.arange(1,mel_input.size(1)+1).unsqueeze(0).cuda()
            mel_pred, postnet_pred, attn, stop_token, _, attn_dec = m.forward(text, mel_input, pos_text, pos_mel)
            mel_input = t.cat([t.zeros([1,1, 80]).cuda(),postnet_pred], dim=1)

        mag_pred = m_post.forward(postnet_pred)
        
    wav = spectrogram2wav(mag_pred.squeeze(0).cpu().numpy())
    write(hp.sample_path + "/test.wav", hp.sr, wav)
Пример #11
0
def quick_statespace(model_dir):
    """Quick state space analysis using simply PCA."""
    rules = ['contextdm1', 'contextdm2']
    h_lastts = dict()
    model = Model(model_dir)
    hp = model.hp
    with tf.Session() as sess:
        model.restore()
        for rule in rules:
            # Generate a batch of trial from the test mode
            trial = generate_trials(rule, hp, mode='test')
            feed_dict = tools.gen_feed_dict(model, trial, hp)
            h = sess.run(model.h, feed_dict=feed_dict)
            lastt = trial.epochs['stim1'][-1]
            h_lastts[rule] = h[lastt,:,:]

    from sklearn.decomposition import PCA
    model = PCA(n_components=5)
    model.fit(np.concatenate(h_lastts.values(), axis=0))
    fig = plt.figure(figsize=(2,2))
    ax = fig.add_axes([.3, .3, .6, .6])
    for rule, color in zip(rules, ['red', 'blue']):
        data_trans = model.transform(h_lastts[rule])
        ax.scatter(data_trans[:, 0], data_trans[:, 1], s=1,
                   label=rule_name[rule], color=color)
    plt.tick_params(axis='both', which='major', labelsize=7)
    ax.set_xlabel('PC 1', fontsize=7)
    ax.set_ylabel('PC 2', fontsize=7)
    lg = ax.legend(fontsize=7, ncol=1, bbox_to_anchor=(1,0.3),
                   loc=1, frameon=False)
    if save:
        plt.savefig('figure/choiceatt_quickstatespace.pdf',transparent=True)
Пример #12
0
def en_ave(mod1, mod2, mod3, mod4, X_test, y_test, state):
    print('=> load average ensemble')
    mod = Model().to(device)
    for p, p1, p2, p3, p4 in zip(mod.parameters(), mod1.parameters(),
                                 mod2.parameters(), mod3.parameters(),
                                 mod4.parameters()):
        p.data.copy_(
            p1.data.mul(0.25).add(p2.data.mul(0.25)).add(
                p3.data.mul(0.25)).add(p4.data.mul(0.25)))
    acc = test_without_dropout(X_test, y_test, mod, device)
    path = os.path.join('exp', 'ensemble.') + str(
        state['itr']) + 'epoch.' + str(state['acq']) + 'acq.pth.tar'
    state['rep'] = path
    torch.save({'model_state_dict': mod.state_dict()}, state['rep'])
    return mod, acc
Пример #13
0
def synthesis(text, args, num):
    m = Model()
    m_post = ModelPostNet()

    m.load_state_dict(load_checkpoint(args.restore_step1, "transformer"))
    m_post.load_state_dict(load_checkpoint(args.restore_step2, "postnet"))

    text = np.asarray(text_to_sequence(text, [hp.cleaners]))
    text = t.LongTensor(text).unsqueeze(0)
    text = text.cuda()
    mel_input = t.zeros([1, 1, 80]).cuda()
    pos_text = t.arange(1, text.size(1) + 1).unsqueeze(0)
    pos_text = pos_text.cuda()

    m = m.cuda()
    m_post = m_post.cuda()
    m.train(False)
    m_post.train(False)

    pbar = tqdm(range(args.max_len))
    with t.no_grad():
        for i in pbar:
            pos_mel = t.arange(1, mel_input.size(1) + 1).unsqueeze(0).cuda()
            mel_pred, postnet_pred, attn, stop_token, _, attn_dec = m.forward(
                text, mel_input, pos_text, pos_mel)
            # print('mel_pred==================',mel_pred.shape)
            # print('postnet_pred==================', postnet_pred.shape)
            mel_input = t.cat([mel_input, postnet_pred[:, -1:, :]], dim=1)
            #print(postnet_pred[:, -1:, :])
            #print(t.argmax(attn[1][1][i]).item())
            #print('mel_input==================', mel_input.shape)

    # #直接用真实mel测试postnet效果
    #aa = t.from_numpy(np.load('D:\SSHdownload\\000101.pt.npy')).cuda().unsqueeze(0)
    # # print(aa.shape)
    mag_pred = m_post.forward(postnet_pred)
    #real_mag = t.from_numpy((np.load('D:\SSHdownload\\003009.mag.npy'))).cuda().unsqueeze(dim=0)
    #wav = spectrogram2wav(postnet_pred)

    #print('shappe============',attn[2][0].shape)
    # count = 0
    # for j in range(4):
    #     count += 1
    #     attn1 = attn[0][j].cpu()
    #     plot_alignment(attn1, path='./training_loss/'+ str(args.restore_step1)+'_'+str(count)+'_'+'S'+str(num)+'.png', title='sentence'+str(num))

    attn1 = attn[0][1].cpu()
    plot_alignment(attn1,
                   path='./training_loss/' + str(args.restore_step1) + '_' +
                   'S' + str(num) + '.png',
                   title='sentence' + str(num))

    wav = spectrogram2wav(mag_pred.squeeze(0).cpu().detach().numpy())
    write(
        hp.sample_path + '/' + str(args.restore_step1) + '-' + "test" +
        str(num) + ".wav", hp.sr, wav)
Пример #14
0
def synthesis(text, args):
    m = Model()
    m_post = ModelPostNet()

    m.load_state_dict(load_checkpoint(args.step1, "transformer"))
    m_post.load_state_dict(load_checkpoint(args.step2, "postnet"))

    text = np.asarray(text_to_sequence(text, [hp.cleaners]))
    text = torch.LongTensor(text).unsqueeze(0)
    text = text.cuda()

    mel_input = np.load('3_0.pt.npy')

    pos_text = torch.arange(1, text.size(1) + 1).unsqueeze(0)
    pos_text = pos_text.cuda()

    m = m.cuda()
    m_post = m_post.cuda()
    m.train(False)
    m_post.train(False)

    with torch.no_grad():
        mag_pred = m_post.forward(
            torch.from_numpy(mel_input).unsqueeze(0).cuda())

    wav = spectrogram2wav(mag_pred.squeeze(0).cpu().numpy())
    write(hp.sample_path + "/test.wav", hp.sr, wav)
Пример #15
0
def synthesis(text, args):
    m = Model()
    m.load_state_dict(load_checkpoint(args.restore_path))
    print("[%s][%s] Synthesizing:" % (args.lang, args.spk), text)

    text = np.asarray([1] + list(text.encode('utf-8')) + [2])
    text = t.LongTensor(text).unsqueeze(0)
    text = text
    mel_input = t.zeros([1, 1, 80])
    pos_text = t.arange(1, text.size(1) + 1).unsqueeze(0)
    pos_text = pos_text
    lang_to_id = json.load(open(os.path.join(args.data_path, 'lang_id.json')))
    spk_to_id = json.load(open(os.path.join(args.data_path, 'spk_id.json')))
    lang_id = lang_to_id[args.lang]
    spk_id = spk_to_id[args.spk]

    lang_id = t.LongTensor([lang_id])
    spk_id = t.LongTensor([spk_id])
    m.train(False)
    pbar = tqdm(range(args.max_len))
    with t.no_grad():
        for i in pbar:
            pos_mel = t.arange(1, mel_input.size(1) + 1).unsqueeze(0)
            mel_pred, postnet_pred, attn, stop_token, _, attn_dec = \
                m.forward(text, mel_input, pos_text, pos_mel, lang_id, spk_id)
            mel_input = t.cat([mel_input, mel_pred[:, -1:, :]], dim=1)
            if stop_token[:, -1].item() > 0:
                break

    mel = postnet_pred.squeeze(0).cpu().numpy()
    wav = mel2wav(mel)
    np.save(args.out_path + "_mel.npy", mel)
    write(args.out_path + ".wav", hp.sr, wav)
    plot_mel(args.out_path + "_mel.png", mel)
    plot_attn(attn, args.out_path + '_align.png')
Пример #16
0
def get_model(num):
    model = nn.DataParallel(Model()).to(device)
    checkpoint = torch.load(
        os.path.join(hp.checkpoint_path, 'checkpoint_%d.pth.tar' % num))
    model.load_state_dict(checkpoint['model'])
    _ = model.eval()
    print("Model Have Been Loaded.")

    return model
Пример #17
0
 def test_network(self):
     input_transform = transforms.Compose([
         transforms.ToPILImage(),
         transforms.Resize((300, 300)),
         transforms.ToTensor(),
         transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
     ])
     self.realset = TestNetwork.RandomData(n_images=10,
                                           n_classes=7,
                                           input_transform=input_transform)
     self.model = Model(n_classes=7)
     self.model.eval()
     all_features, all_outputs, all_preds, all_labels = predict(
         self.model, self.realset, batch_size=4, n_classes=7, GPUs=None)
     recall = np.sum(all_preds == all_labels) / float(len(self.realset))
     ap = AP(all_outputs, all_labels)
     mean_ap = meanAP(all_outputs, all_labels)
     self.assertGreaterEqual(mean_ap, 0)
     self.assertLessEqual(mean_ap, 1)
    def compute_H(self,  
                rules=None, 
                trial_list=None, 
                recompute=False,):
        
        if rules is not None:
            self.rules = rules
        else:
            self.rules = self.hp['rule_trains']
        
        if trial_list is not None:
            self.trial_list = trial_list
        else:
            self.trial_list = self.log['trials']

        self.in_loc = dict()
        self.in_loc_set = dict()
        self.epoch_info = dict()

        trial_store = dict()
        #self.trial_store = dict()########################## do we really need self.?
        print("Epoch information:")
        for rule in self.rules:
            trial_store[rule] = generate_trials(rule, self.hp, 'test', noise_on=False)
            self.in_loc[rule] = np.array([np.argmax(i) for i in trial_store[rule].input_loc])
            self.in_loc_set[rule] = sorted(set(self.in_loc[rule]))
            self.epoch_info[rule] = trial_store[rule].epochs
            #self.trial_store[rule] = generate_trials(rule, self.hp, 'test', noise_on=False)
            #self.in_loc[rule] = np.array([np.argmax(i) for i in self.trial_store[rule].input_loc])
            print('\t'+rule+':')
            for e_name, e_time in self.epoch_info[rule].items():
                print('\t\t'+e_name+':',e_time)
        
        for trial_num in self.trial_list:
            sub_dir = self.model_dir+'/'+str(trial_num)+'/'
            for rule in self.rules:
                if recompute or not os.path.exists(sub_dir+'H_'+rule+'.pkl'):
                    model = Model(sub_dir, hp=self.hp)
                    with tf.Session() as sess:
                        model.restore()
                        self._compute_H(model, rule, trial_store[rule], sess,)
Пример #19
0
 def __init__(self, batch_loader):
     config = tf.ConfigProto(allow_soft_placement=True)
     config.gpu_options.allow_growth = True
     optimizer = tf.train.GradientDescentOptimizer(args.lr)
     self.batch_loader = batch_loader
     self.sess = tf.Session(config=config)
     with tf.variable_scope(args.model, reuse=tf.AUTO_REUSE):
         self.model = Model(self.batch_loader, args, keep_prob=0.5)
         #            self._loss, self.train_logit = self.model.cnn_att(keep_prob=0.5)
         self.model.cnn_att()
         self.model.training = False
         self.test_logit = self.model.logit
         self.model.training = True
         grads = optimizer.compute_gradients(self.model.loss)
         self.train_op = optimizer.apply_gradients(grads)
         tf.add_to_collection("loss", self.model.loss)
         tf.add_to_collection("train_logit", self.model.logit)
         summary_writer = tf.summary.FileWriter(args.summary_dir,
                                                self.sess.graph)
         self.saver = tf.train.Saver(max_to_keep=None)
         self.sess.run(tf.global_variables_initializer())
Пример #20
0
def _psychometric_dm(model_dir, rule, params_list, batch_shape):
    """Base function for computing psychometric performance in 2AFC tasks

    Args:
        model_dir : model name
        rule : task to analyze
        params_list : a list of parameter dictionaries used for the psychometric mode
        batch_shape : shape of each batch. Each batch should have shape (n_rep, ...)
        n_rep is the number of repetitions that will be averaged over

    Return:
        ydatas: list of performances
    """
    print('Starting psychometric analysis of the {:s} task...'.format(
        rule_name[rule]))

    model = Model(model_dir)
    hp = model.hp
    with tf.Session() as sess:
        model.restore()

        ydatas = list()
        for params in params_list:

            trial = generate_trials(rule, hp, 'psychometric', params=params)
            feed_dict = tools.gen_feed_dict(model, trial, hp)
            y_loc_sample = sess.run(model.y_hat_loc, feed_dict=feed_dict)
            y_loc_sample = np.reshape(y_loc_sample[-1], batch_shape)

            stim1_locs_ = np.reshape(params['stim1_locs'], batch_shape)
            stim2_locs_ = np.reshape(params['stim2_locs'], batch_shape)

            # Average over the first dimension of each batch
            choose1 = (get_dist(y_loc_sample - stim1_locs_) < THETA).sum(
                axis=0)
            choose2 = (get_dist(y_loc_sample - stim2_locs_) < THETA).sum(
                axis=0)
            ydatas.append(choose1 / (choose1 + choose2))

    return ydatas
def activity_histogram(model_dir,
                       rules,
                       title=None,
                       save_name=None):
    """Plot the activity histogram."""

    if isinstance(rules, str):
        rules = [rules]

    h_all = None
    model = Model(model_dir)
    hp = model.hp
    with tf.Session() as sess:
        model.restore()

        t_start = int(500/hp['dt'])

        for rule in rules:
            # Generate a batch of trial from the test mode
            trial = generate_trials(rule, hp, mode='test')
            feed_dict = tools.gen_feed_dict(model, trial, hp)
            h = sess.run(model.h, feed_dict=feed_dict)
            h = h[t_start:, :, :]
            if h_all is None:
                h_all = h
            else:
                h_all = np.concatenate((h_all, h), axis=1)

    # var = h_all.var(axis=0).mean(axis=0)
    # ind = var > 1e-2
    # h_plot = h_all[:, :, ind].flatten()
    h_plot = h_all.flatten()

    fig = plt.figure(figsize=(1.5, 1.2))
    ax = fig.add_axes([0.2, 0.2, 0.7, 0.6])
    ax.hist(h_plot, bins=20, density=True)
    ax.set_xlabel('Activity', fontsize=7)
    [ax.spines[s].set_visible(False) for s in ['left', 'top', 'right']]
    ax.set_yticks([])
Пример #22
0
def ini_model_train(opt):
    X_ini, y_ini, X_test, y_test, X_train_All, y_train_All = ini_model(opt)
    mod = Model().to(device)
    optimizer = optim.SGD(mod.parameters(), lr=opt.ini_lr)
    criterion = nn.CrossEntropyLoss()
    num_batches_train = X_ini.shape[0] // opt.ini_batch_size
    mod.train()
    for i in range(opt.ini_epoch):
        loss = 0
        for j in range(num_batches_train):
            slce = get_slice(j, opt.ini_batch_size)
            X_tra = torch.from_numpy(X_ini[slce]).float().to(device)
            Y_tra = torch.from_numpy(y_ini[slce]).long().to(device)
            optimizer.zero_grad()
            out = mod(X_tra)
            batch_loss = criterion(out, Y_tra)
            batch_loss.backward()
            optimizer.step()
            loss += batch_loss
        mod.eval()
        acc = test_without_dropout(X_test, y_test, mod, device)
        print('\n[{}/{} epoch], training loss:{:.4f}, test accuracy is:{} \n'.
              format(i, opt.ini_epoch,
                     loss.item() / num_batches_train, acc))
        if i + 1 == opt.ini_epoch:
            for d in range(opt.num_dev):
                torch.save(
                    {
                        'epoch': i,
                        'model_state_dict': mod.state_dict(),
                        'optimizer_state_dict': optimizer.state_dict(),
                        'loss': loss.item()
                    },
                    os.path.join(opt.ini_model_path, 'device' + str(d),
                                 "ini.model.pth.tar"))
            torch.save(
                {
                    'epoch': i,
                    'model_state_dict': mod.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'loss': loss.item()
                }, opt.ini_model_path)
    return X_test, y_test, X_train_All, y_train_All
Пример #23
0
class TestNetwork(unittest.TestCase):
    class RandomData(Dataset):
        def __init__(self, n_images, n_classes, input_transform=None):
            self.n_images = n_images
            self.n_classes = n_classes
            self.input_transform = input_transform

        def __getitem__(self, index):
            images = torch.rand(3, 300, 300)
            labels = random.randint(0, self.n_classes - 1)
            if self.input_transform:
                images = self.input_transform(images)
            return images, labels

        def __len__(self):
            return self.n_images

    def test_network(self):
        input_transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize((300, 300)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
        self.realset = TestNetwork.RandomData(n_images=10,
                                              n_classes=7,
                                              input_transform=input_transform)
        self.model = Model(n_classes=7)
        self.model.eval()
        all_features, all_outputs, all_preds, all_labels = predict(
            self.model, self.realset, batch_size=4, n_classes=7, GPUs=None)
        recall = np.sum(all_preds == all_labels) / float(len(self.realset))
        ap = AP(all_outputs, all_labels)
        mean_ap = meanAP(all_outputs, all_labels)
        self.assertGreaterEqual(mean_ap, 0)
        self.assertLessEqual(mean_ap, 1)
Пример #24
0
def networkx_illustration(model_dir):
    import networkx as nx

    model = Model(model_dir)
    with tf.Session() as sess:
        model.restore()
        # get all connection weights and biases as tensorflow variables
        w_rec = sess.run(model.w_rec)

    w_rec_flat = w_rec.flatten()
    ind_sort = np.argsort(abs(w_rec_flat - np.mean(w_rec_flat)))
    n_show = int(0.01 * len(w_rec_flat))
    ind_gone = ind_sort[:-n_show]
    ind_keep = ind_sort[-n_show:]
    w_rec_flat[ind_gone] = 0
    w_rec2 = np.reshape(w_rec_flat, w_rec.shape)
    w_rec_keep = w_rec_flat[ind_keep]
    G = nx.from_numpy_array(abs(w_rec2), create_using=nx.DiGraph())

    color = w_rec_keep
    fig = plt.figure(figsize=(4, 4))
    ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
    nx.draw(G,
            linewidths=0,
            width=0.1,
            alpha=1.0,
            edge_vmin=-3,
            edge_vmax=3,
            arrows=False,
            pos=nx.circular_layout(G),
            node_color=np.array([99. / 255] * 3),
            node_size=10,
            edge_color=color,
            edge_cmap=plt.cm.RdBu_r,
            ax=ax)
    plt.savefig('figure/illustration_networkx.pdf', transparent=True)
Пример #25
0
def upload():
    # Get the name of the uploaded file
    file = request.files['file']
    # Check if the file is one of the allowed types/extensions
    if file and allowed_file(file.filename):
        # Make the filename safe, remove unsupported chars
        filename = secure_filename(file.filename)
        # Move the file form the temporal folder to
        # the upload folder we setup
        file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
        print(os.path.join(app.config['UPLOAD_FOLDER'], filename))
        model = Model(os.path.join(app.config['UPLOAD_FOLDER'], filename))

        # Redirect the user to the uploaded_file route, which
        # will basicaly show on the browser the uploaded file
        return redirect(url_for('uploaded_file',
                                filename=filename))
Пример #26
0
def ini_train(X_ini, y_ini, X_te, y_te, epochs, paths, device, batch_size, lr,
              momentum, arr_drop):
    mod = Model(arr_drop).to(device)
    optimizer = optim.SGD(mod.parameters(), lr=lr, momentum=momentum)
    criterion = nn.CrossEntropyLoss()
    #batch_size = 200
    num_batches_train = X_ini.shape[0] // batch_size
    print("number of batch ", num_batches_train)
    mod.train()
    for i in range(epochs):
        loss = 0
        for j in range(num_batches_train):
            slce = get_slice(j, batch_size)
            X_tra = torch.from_numpy(X_ini[slce]).float().to(device)
            Y_tra = torch.from_numpy(y_ini[slce]).long().to(device)
            optimizer.zero_grad()
            out = mod(X_tra)
            batch_loss = criterion(out, Y_tra)
            batch_loss.backward()
            optimizer.step()
            loss += batch_loss
        mod.eval()
        with torch.no_grad():
            X_va = torch.from_numpy(X_te).float().to(device)
            Y_va = torch.from_numpy(y_te).long().to(device)
            output = mod(X_va)
            preds = torch.max(output, 1)[1]
            acc = accuracy_score(Y_va, preds)
        print('\n[{}/{} epoch], training loss:{:.4f}, test accuracy is:{} \n'.
              format(i, epochs,
                     loss.item() / num_batches_train, acc))
    if i + 1 == epochs:
        for path in paths:
            torch.save(
                {
                    'epoch': i,
                    'model_state_dict': mod.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'loss': loss.item()
                }, os.path.join(path, "ini.model.pth.tar"))
    return mod
Пример #27
0
 def __init__(self, options):
     """Gonna need a db, and some creds."""
     log.info("Starting AG Chatter Bot.")
     self.options = options
     # Build Constructors
     self.idx2word = Database(
             host=options.redis_host, pass_=options.redis_pass, db=0
         )
     self.word2idx = Database(
             host=options.redis_host, pass_=options.redis_pass, db=1
         )
     self.dataReader = DataReader(
             self.options, self.idx2word, self.word2idx
         )
     self.model = Model(
             self.options
         )
     log.debug(options)
     log.info("Init complete.")
Пример #28
0
def model(dataset, model_name=None, device=None, train=True):
    """加载模型"""
    device = device or torch.device(
        "cuda" if torch.cuda.is_available() else "cpu")
    net = Model(vocab_size=dataset.vocab_size,
                embedding_dim=config.embedding_dim,
                output_size=dataset.target_vocab_size,
                encoder_hidden_size=config.encoder_hidden_size,
                decoder_hidden_size=config.decoder_hidden_size,
                encoder_layers=config.encoder_layers,
                decoder_layers=config.decoder_layers,
                dropout=config.dropout,
                embedding_weights=dataset.vector_weights,
                device=device)
    if model_name:  # 如果指定了模型名称, 就加载对应的模型
        pre_trained_state_dict = torch.load(FILE_PATH + config.model_path +
                                            model_name,
                                            map_location=device)
        state_dict = net.state_dict()
        state_dict.update(pre_trained_state_dict)
        net.load_state_dict(state_dict)
    net.train() if train else net.eval()
    return net
Пример #29
0
def multi_test():
    model = Model()
    loss = Loss(model)
    data = Data()
    main = Main(model, loss, data)
    print('start evaluate')
    main.load_model(opt.weight, 0)
    main.model.eval()
    qf = extract_feature(main.model, tqdm(main.query_loader)).numpy()
    rank1 = []
    rank3 = []
    rank5 = []
    rank10 = []
    rank20 = []
    for i in range(10):
        data = Data(i)
        main = Main(model, loss, data)
        print('start evaluate', i)
        main.load_model(opt.weight, 0)
        r1, r3, r5, r10, r20 = main.evaluate_multi_test(qf, opt.save_path + opt.name + '_accr.txt', i)
        rank1.append(r1)
        rank3.append(r3)
        rank5.append(r5)
        rank10.append(r10)
        rank20.append(r20)
    r1 = np.mean(rank1)
    r3 = np.mean(rank3)
    r5 = np.mean(rank5)
    r10 = np.mean(rank10)
    r20 = np.mean(rank20)

    print('[Average] rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}'
          .format(r1, r3, r5, r10, r20))

    with open(opt.save_path + opt.name + '_accr.txt', 'a') as f:
        f.write(
            '[Average] rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}'
          .format(r1, r3, r5, r10, r20))
Пример #30
0
def train():
    model = Model()
    model.to("cuda:0")
    Opt = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-3)
    # checkpoint = torch.load("./model.pth")
    # model.load_state_dict(checkpoint["model"])
    # Opt.load_state_dict(checkpoint["Opt"])
    for i in range(10000):
        Opt.zero_grad()
        imgs, targets = read_batch()
        imgs = torch.tensor(imgs, dtype=torch.float32).to("cuda:0")
        targets = torch.tensor(targets, dtype=torch.float32).to("cuda:0")
        preds = model(imgs)
        loss = make_loss(preds, targets)
        loss.backward()
        Opt.step()
        if i % 10 == 0:
            print("Iteration: %d, Loss: %f"%(i, loss))
        if i % 10 == 0:
            state = {'model':model.state_dict(), 'Opt':Opt.state_dict(), 'itr':i}
            torch.save(state, "./model.pth")