Beispiel #1
0
def algor_hot(train, valid, test, topN):
    stat = {}
    for uid, items in train:
        for iid, score, ts in items:
            if iid not in stat:
                stat[iid] = [0, 0]

            stat[iid][score] += 1

    top = sorted(stat.iteritems(), key=lambda x: -x[1][1])
    print 'stat over'

    #print top

    def predict(uid, items):
        readset = set(map(lambda x: x[0], items))

        ans = []
        for item in map(lambda x: x[0], top):
            if item in readset:
                continue
            ans.append(item)
            if len(ans) == topN:
                break
        return ans[:topN]

    utils.measure(predict, test)
Beispiel #2
0
def algor_dnn_predict(train, valid, test, topN):
    movie_id_set = set()
    for uid, items in train:
        for iid, score, ts in items:
            if iid not in movie_id_set:
                movie_id_set.add(iid)

    model = dnn.FC_DNN(128716, 128)
    model.load_state_dict(torch.load('temp/dnn.pkl'))

    movie_ids = list(movie_id_set)

    def predict(uid, items):
        readset = set(map(lambda x: x[0], items))

        ans = []
        # TODO
        #  writing predictor code.
        for item in map(lambda x: x[0], top):
            if item in readset:
                continue
            ans.append(item)
            if len(ans) == topN:
                break
        return ans[:topN]

    utils.measure(predict, test)
Beispiel #3
0
    def test_one_batch(self, batch, batch_size):
        """

        :param batch:
        :param batch_size:
        :return:
        """
        with torch.no_grad():
            nl_batch = batch[4]

            # outputs: [T, B, H]
            # hidden: [1, B, H]
            code_outputs, ast_outputs, decoder_hidden = \
                self.model(batch, batch_size, self.nl_vocab, is_test=True)

            # decode
            batch_sentences = self.beam_decode(batch_size=batch_size,
                                               code_outputs=code_outputs,
                                               ast_outputs=ast_outputs,
                                               decoder_hidden=decoder_hidden)

            # translate indices into words both for candidates
            candidates = self.translate_indices(batch_sentences)

            # measure
            s_blue_score, meteor_score = utils.measure(batch_size,
                                                       references=nl_batch,
                                                       candidates=candidates)

            return nl_batch, candidates, s_blue_score, meteor_score
Beispiel #4
0
def algor_dssm(train, valid, test, topN):
    index = embedding_dict.EmbeddingDict('temp/dssm_out_emb.txt',
                                         contain_key=False,
                                         metric='angular')
    embeddingBag = nn.EmbeddingBag(131263, 64, mode='mean')
    embeddingBag.load_state_dict(torch.load('temp/dssm.pkl'))

    def predict(uid, items):
        readset = set(map(lambda x: x[0], items))

        # same code for input.
        InputSize = 10
        input_nids = []
        input_offset = []

        input = []
        for j in range(InputSize):
            input.append(items[random.randint(0, len(items) - 1)][0])

        input_offset.append(len(input_nids))
        input_nids += input

        print input_offset
        print input_nids

        inputs_emb = embeddingBag(torch.tensor(input_nids),
                                  torch.tensor(input_offset))
        input_emb = inputs_emb[0]

        print input_emb

        ans, dis = index.index.get_nns_by_vector(input_emb,
                                                 n=100,
                                                 include_distances=True)
        ret = []
        for item, score in zip(ans, dis):
            if item in readset:
                continue

            ret.append(str(item))
            if len(ret) >= TopN:
                return ret

    utils.measure(predict, test, debug=True)
Beispiel #5
0
def plot_ekf(filename):
    t_tot = 16
    ts = 0.01
    dt = 0.001
    L = int(ts / dt)
    mu_0 = 1
    sigma_0 = math.sqrt(0.001)
    sigma_u = math.sqrt(0.01)
    sigma_m = math.sqrt(1)
    a, r, b = 10, 28, 8 / 3
    Gamma = np.eye(3)

    xs, ys, zs = simulate(t_tot, mu_0, sigma_0, a, r, b, dt, sigma_u, Gamma)
    xs_m = measure(xs, L, sigma_m)

    mu, cov = ekf(a, r, b, dt, sigma_u, Gamma, mu_0, sigma_0, ts, t_tot, xs_m,
                  sigma_m)

    print(cov[int(5 / ts)][0, 0])

    plot_trajectory(L, t_tot, dt, xs, xs_m, mu[:, 0], 'x', filename[0])
    plot_trajectory(L, t_tot, dt, ys, None, mu[:, 1], 'y', filename[1])
    plot_trajectory(L, t_tot, dt, zs, None, mu[:, 2], 'z', filename[2])

    # Error function
    fig, ax = plt.subplots()
    x_real = np.empty((int(t_tot / ts) + 1, 3))
    x_real[:, 0] = xs[::L]
    x_real[:, 1] = ys[::L]
    x_real[:, 2] = zs[::L]

    a = np.arange(0, int(t_tot / dt) + 1, 1)
    err = np.linalg.norm(x_real - mu, axis=1)
    plt.plot(a[::L], err, 'b', label="Global error")
    plt.axhline(np.mean(err),
                color='b',
                linestyle='dashed',
                label="Mean global error")
    err_x = np.abs(x_real[:, 0] - mu[:, 0])
    plt.axhline(np.mean(err_x),
                color='g',
                linestyle='dashed',
                label="Mean error on x")
    plt.ylim(0, 6)

    legend = ax.legend(loc='upper right')
    for label in legend.get_texts():
        label.set_fontsize('large')

    for label in legend.get_lines():
        label.set_linewidth(1.5)

    if filename[3] is not None:
        fig.savefig(PATH + filename[3], bbox_inches='tight', pad_inches=0)

    plt.show()
Beispiel #6
0
def algor_item2vec(train, valid, test, topN):
    import embedding_dict
    index = embedding_dict.ItemIndex('temp/word2vec.output.txt', 500)

    def predict(uid, items):
        readset = set(map(lambda x: x[0], items))

        stat_dict = {}
        search_error = 0
        for idx, score, ts in items:
            if score != 1:
                continue
            idx = int(idx)
            try:
                ans, dis = index.index.get_nns_by_item(idx,
                                                       n=300,
                                                       include_distances=True)
                #print idx, ans
                #print dis

                for item, score in zip(ans, dis):
                    if item == idx:
                        continue
                    stat_dict[item] = stat_dict.get(item, 0) + score

            except:
                search_error += 1
                continue

        ans = sorted(stat_dict.iteritems(), key=lambda x: -x[1])
        ret = []
        for item, score in ans:
            if item in readset:
                continue
            ret.append(str(item))
            if len(ret) >= topN:
                return ret
        return ret

    utils.measure(predict, test, debug=False)
Beispiel #7
0
def algor_cooc(train, valid, test, topN, only1=False):
    # using dict built by build_cooc.py
    fd = file('temp/cooc.txt')

    cooc_dict = {}
    for key, items in pydev.foreach_row(fd):
        items = map(lambda x: (x[0], int(x[1])),
                    map(lambda x: x.split(':'), items.split(',')))
        cooc_dict[key] = items
    print >> sys.stderr, 'cooc load over'

    def predict(uid, items):
        local_stat = {}
        readset = set(map(lambda x: x[0], items))

        for item, score, _ in items:
            if only1 and score != 1:
                continue
            cooc_items = cooc_dict.get(item, [])
            for c_item, c_count in cooc_items:
                if c_item in readset:
                    continue
                local_stat[c_item] = local_stat.get(c_item, 0) + c_count

        ans = map(lambda x: x[0],
                  sorted(local_stat.iteritems(), key=lambda x: -x[1])[:topN])
        '''
        print 'items:'
        print items
        print 'local:'
        print sorted(local_stat.iteritems(), key=lambda x:-x[1])[:20]
        print 'ans:'
        print ans
        '''

        return ans

    utils.measure(predict, test, debug=False)
Beispiel #8
0
def algor_naive_usercf(train, valid, test, topN):
    index = {}
    readlist = {}
    for uid, items in train:
        rl = map(lambda x:x[0], filter(lambda x:x[1]==1, items))

        readlist[uid] = rl
        for iid in rl:
            if iid not in index:
                index[iid] = []
            index[iid].append(uid)

    print >> sys.stderr, 'index build ok'
    
    def predict(uid, items):
        readset = set(map(lambda x:x[0], items))

        sim_users = {}
        for iid, score, ts in items:
            rlist = index.get(iid, [])
            for user in rlist:
                sim_users[user] = sim_users.get(user, 0) + 1

        sim_users_list = sorted(sim_users.iteritems(), key=lambda x:-x[1])[:20]
        #print sim_users_list

        s_set = {}
        for sim_user, sim_count in sim_users_list:
            for item in readlist[sim_user]:
                if item in readset:
                    continue
                s_set[item] = s_set.get(item, 0)+1
        ret = map(lambda x:x[0], sorted(s_set.iteritems(), key=lambda x:-x[1])[:topN])
        #print ret
        return ret

    utils.measure(predict, test, debug=False)
Beispiel #9
0
    def calculate(self):

        result = utils.measure()

        # down velocity in Megas
        self.down_shiped = result['download'] / 1000000

        message_box_str = " - Velocidade de download entregue: %.2f Megas \n - Velocidade de download contratada: %.2f Megas \n - O provedor está entregando %.2f%% do que foi contratado. \n\n Deseja reclamar no instagram do Provedor? " % (
            self.down_shiped, self.down_contract_var.get(),
            (100 * self.down_shiped) / self.down_contract_var.get())
        answer = messagebox.askyesno("Resultados - <Provedor>",
                                     message_box_str)

        if answer:  # if the user wish to do the post on the instagram
            # use threading to run the playsound in parallel with the do_reclamation function
            # for better effycience
            x = threading.Thread(target=playsound, args=('taca.mp3', ))
            x.start()
            self.complain(result)
def plot_mes_vs_real(export=False, filename='q2-mes-vs-real.pdf'):
    """Compare noisy measurements of the first coordinate of the particle
    positions with the actual simulated first coordinate.

    Arguments:
    export -- indicate if the plot should be PDF exported and saved (default
    False)
    filename -- exported PDF filename (default q2-mes-vs-real.pdf)
    """
    t_tot = 50
    dt = 0.001
    ts = 0.01
    L = int(ts / dt)
    xs, ys, zs = simulate(a=10,
                          r=28,
                          b=8 / 3,
                          mu_0=(1, 1, 1),
                          sigma_0=math.sqrt(0.001),
                          dt=dt,
                          sigma_u=math.sqrt(0.0000001),
                          Gamma=np.eye(3),
                          t_tot=t_tot)
    xs_m = measure(xs, L=L, sigma_m=1)

    fig, ax = plt.subplots()
    a = np.arange(0, int(t_tot / dt) + 1, 1)
    ax.plot(a, xs, 'b', label='First coordinate trajectory')
    ax.plot(a[:-1:L], xs_m, 'g.', label='Noisy measurements', markersize=4.0)
    legend = ax.legend(loc='upper right')

    for label in legend.get_texts():
        label.set_fontsize('large')

    for label in legend.get_lines():
        label.set_linewidth(1.5)

    plt.show()

    if export:
        fig.savefig(PATH + filename, bbox_inches='tight', pad_inches=0)
Beispiel #11
0
def main():
    L = int(ts / dt)
    xs, ys, zs = simulate(
        t_tot,
        mu_0,
        sigma_0,
        a,
        r,
        b,
        dt,
        sigma_u,
        Gamma,
    )
    xs_m = measure(xs, L, sigma_m)

    distribs_ekf = ekf_distribs(xs_m)
    distribs_csmc = csmc_distribs(xs_m)

    for k in range(3):
        C_ekf = np.sort(distribs_ekf[k])
        C_csmc = np.sort(distribs_csmc[k])
        R = np.arange(n) / float(n)

        fig, ax = plt.subplots()
        ax.plot(C_ekf, R, label="{} distrib. from EKF".format(dimensions[k]))
        ax.plot(C_csmc, R, label="{} distrib. from CSMC".format(dimensions[k]))

        legend = ax.legend(loc='upper right')

        for label in legend.get_texts():
            label.set_fontsize('large')

        for label in legend.get_lines():
            label.set_linewidth(1.5)

        #fig.savefig(PATH + "distrib-{}.pdf".format(dimensions[k]),
        #            bbox_inches='tight', pad_inches=0)

        plt.show()
Beispiel #12
0
    def nid_ctr(self):
        stat = {}
        global_disp = 0
        global_click = 0
        for uid, iid, click in self.train:
            if iid not in stat:
                stat[iid] = [0, 0]

            stat[iid][0] += 1
            global_disp += 1
            if click:
                global_click += 1
                stat[iid][1] += 1

        global_click_ratio = global_click * 0.00001
        global_disp_ratio = global_disp * 0.00001

        def predict(uid, iid, debug_fd, smooth):
            s = stat.get(iid, [0, 0])
            if debug_fd:
                print >> debug_fd, 'stat\t%s\t%d\t%d' % (iid, s[0], s[1])
            if smooth==0:
                return s[1] * 1. / (s[0] + 0.1)
            elif smooth==1:
                return (s[1] + 1.) / (s[0] + 10.)
            elif smooth==2:
                return (s[1] + global_click_ratio) / (s[0] + global_disp_ratio)

        predict_none_smooth = lambda u,i,d:predict(u,i,d,smooth=0)
        predict_static_smooth = lambda u,i,d:predict(u,i,d,smooth=1)
        predict_ratio_smooth = lambda u,i,d:predict(u,i,d,smooth=2)

        pydev.info('nid_ctr with none smooth')
        utils.measure(predict_none_smooth, self.test, debug=self.debug)
        pydev.info('nid_ctr with static smooth')
        utils.measure(predict_static_smooth, self.test, debug=self.debug)
        pydev.info('nid_ctr with ratio smooth')
        utils.measure(predict_ratio_smooth, self.test, debug=self.debug)
Beispiel #13
0
def fibonacci_by_recurse_measure():
    return utils.measure(fibonacci)
Beispiel #14
0
    def test_iter(self):
        """
        evaluate model on self.dataset
        :return: scores
        """
        total_references = []
        total_candidates = []

        out_file = None
        if config.save_test_outputs:
            try:
                out_file = open(os.path.join(config.output_root, 'test_outputs.txt'), encoding='utf-8', mode='w')
            except IOError:
                logger.error('Test details file create failed')

        with torch.no_grad():

            sample_id = 0

            p_bar = tqdm(self.dataloader, desc='[Testing...]')
            for index_batch, batch in enumerate(p_bar):

                batch_size = batch.batch_size
                references = batch.nl_batch

                # outputs: [T, B, H]
                # hidden: [1, B, H]
                source_outputs, code_outputs, ast_outputs, decoder_hidden = \
                    self.model(batch, batch_size, self.nl_vocab, is_test=True)

                extend_source_batch = None
                extra_zeros = None
                if config.use_pointer_gen:
                    extend_source_batch, _, extra_zeros = batch.get_pointer_gen_input()

                # decode
                batch_sentences = self.beam_decode(batch_size=batch_size,
                                                   source_outputs=source_outputs,
                                                   code_outputs=code_outputs,
                                                   ast_outputs=ast_outputs,
                                                   decoder_hidden=decoder_hidden,
                                                   extend_source_batch=extend_source_batch,
                                                   extra_zeros=extra_zeros)

                # translate indices into words both for candidates
                candidates = self.translate_indices(batch_sentences, batch.batch_oovs)

                total_references += references
                total_candidates += candidates

                if config.save_test_outputs:
                    for index in range(len(candidates)):
                        out_file.write('Sample {}:\n'.format(sample_id))
                        out_file.write(' '.join(['Reference:'] + references[index]) + '\n')
                        out_file.write(' '.join(['Candidate:'] + candidates[index]) + '\n')
                        out_file.write('\n')
                        sample_id += 1

            # measure
            s_blue_score, meteor_score = utils.measure(references=total_references, candidates=total_candidates)
            c_bleu = utils.corpus_bleu_score(references=total_references, candidates=total_candidates)

            avg_scores = {'c_bleu': c_bleu, 's_bleu': s_blue_score, 'meteor': meteor_score}

            if out_file:
                for name, score in avg_scores.items():
                    out_file.write(name + ': ' + str(score) + '\n')
                out_file.flush()
                out_file.close()

        return avg_scores
    Report += res["report"]
    Report += "    Training MSE: " + str(res["train_mse"]) + "\n"
    Report += "    Testing MSE: " + str(res["test_mse"]) + "\n"
    # Results saving
    BICs.append(res["BICs"])
    SUPPs.append(res["support"])
    TRUEs.append(res["true"])
    ERR_train.append(res["train_mse"])
    ERR_test.append(res["test_mse"])

############################################
### Metric calculation and result saving ###
############################################

BICs = np.array(BICs)
DIR_res = "../outputs/reports/"

fsr, nsr = measure(TRUEs, SUPPs)
final_report = "For " + str(K) + " datasets:\n"
final_report += "  False Selection Rate: " + str(fsr) + "\n"
final_report += "  Negative Selection Rate: " + str(nsr) + "\n"
final_report += "  Training error: " + str(np.mean(ERR_train)) + "(" + str(
    np.std(ERR_train)) + ")\n"
final_report += "  Testing error: " + str(np.mean(ERR_test)) + "(" + str(
    np.std(ERR_test)) + ")\n"

Report = final_report + Report
report_file = open(DIR_res + "DFS_linear.txt", "a")
report_file.write(Report)
report_file.close()
def main():
    assert torch.cuda.is_available()
    model_fname = 'logs_data_official'
    model_fname += ('_vgg' if 'vgg' in args.backbone else '_resnet') 
    model_fname += ('_pascal' if 'pascal' in args.dataset else '_coco')
    if not os.path.isdir(model_fname):
        os.mkdir(model_fname)
        
    model_fname += '/deeplab_{0}_{1}_{5}_{3}_{4}_v3_{2}_model_{6}'.format(
        args.backbone, args.dataset, args.exp, args.group, args.num_folds, args.output_stride, args.model)

    if not os.path.isdir(model_fname):
        os.mkdir(model_fname)

    if args.dataset == 'pascal':
        dataset = VOCSegmentationRandom('datasets/VOCdevkit',
                                           train=args.train, crop_size=args.crop_size,
                                           group=args.group, num_folds=args.num_folds,
                                           batch_size=args.batch_size, num_shots=args.num_shots,
                                           iteration=args.iteration)
    elif args.dataset == 'coco':
        dataset = COCO('datasets/coco2017/',
                       train=args.train, crop_size=args.crop_size,
                       group=args.group, num_folds=args.num_folds,
                       batch_size=args.batch_size, num_shots=args.num_shots,
                       iteration=args.iteration)
    else:
        raise ValueError('Unknown dataset: {}'.format(args.dataset))
    if args.backbone == 'resnet101':
        model = getattr(few_shot_model, 'resnet101')(
            pretrained=(not args.scratch),
            num_groups=args.groups,
            beta=args.beta,
            os=args.output_stride,
            model=args.model)
    elif args.backbone == 'vgg16':
        model = getattr(few_shot_model, 'vgg16')(
            pretrained=(not args.scratch),
            model=args.model)
    else:
        raise ValueError('Unknown backbone: {}'.format(args.backbone))

    if args.dataset == 'pascal':
        if args.group == 'all':
            ref_imgs, query_imgs, query_labels, ref_labels, list_labels = [], [], [], [], []
            
            for i in range(args.num_folds):
                val_file = 'data/val_{}_{}_{}_new.pkl'.format(args.dataset, i, args.num_folds)
                temp_data = torch.load(val_file)
                ref_imgs.extend(temp_data[0][:250])
                ref_labels.extend(temp_data[1][:250])
                query_imgs.extend(temp_data[2][:250])
                query_labels.extend(temp_data[3][:250])
                list_labels.extend(temp_data[4][:250])
                
        else:  
            datalayer = SSDatalayer(args.group, args.num_shots)  
            val_file = 'data/val_{}_{}_{}{}_new.pkl'.format(
                args.dataset, args.group, args.num_folds, '' if args.num_shots == 1 else '_5shot')
            print(val_file)

            if args.num_shots == 1:
                if not os.path.isfile(val_file):
                    ref_imgs, query_imgs, query_labels, ref_labels, list_labels = [], [], [], [], []
                    
                    while True:

                        data = datalayer.dequeue()

                        dat = data[0]

                        semantic_label = dat['deploy_info']['second_semantic_labels'][0]
                        list_labels.append(semantic_label)

                        if args.num_shots == 1:
                            ref_img = dat['second_img'][0]
                            ref_label = dat['first_label'][0]
                            query_img = dat['first_img'][0]
                            query_label = dat['second_label'][0]

                            if query_label.sum() < 1000:
                                continue

                            ref_img, ref_label = torch.Tensor(ref_img), torch.Tensor(ref_label)
                            query_img, query_label = torch.Tensor(query_img), torch.Tensor(query_label)

                            ref_img = torch.unsqueeze(ref_img, dim=0)
                            query_img = torch.unsqueeze(query_img, dim=0)

                            ref_imgs.append(ref_img)
                            ref_labels.append(ref_label.long())
                            query_imgs.append(query_img)
                            query_labels.append(query_label.long())

                        if len(list_labels) >= 1000:
                            break

                    torch.save((ref_imgs, ref_labels, query_imgs,
                                query_labels, list_labels), val_file)

                else:
                    ref_imgs, ref_labels, query_imgs, query_labels, list_labels = torch.load(
                        val_file)
            
            else: # 5 shot:
                val_dataset = VOCSegmentationRandom('datasets/VOCdevkit', train=False, group=args.group, 
                                num_folds=args.num_folds, batch_size=args.batch_size, 
                                num_shots=args.num_shots, iteration=args.iteration,
                                crop_size=None
                                )

    elif args.dataset == 'coco':
        val_dataset = COCO('datasets/coco2017/',
                            train=False, crop_size=args.crop_size,
                            group=args.group, num_folds=args.num_folds,
                            batch_size=args.batch_size, num_shots=args.num_shots,
                            iteration=args.iteration)

    if args.train:
        writer = SummaryWriter('logs/{}'.format(model_fname))
        criterion = nn.CrossEntropyLoss(ignore_index=255)
        model = nn.DataParallel(model).cuda()
        if args.freeze_bn:
            for m in model.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eval()
                    m.weight.requires_grad = False
                    m.bias.requires_grad = False

        optimizer = optim.SGD(model.module.parameters(
        ), lr=args.base_lr, momentum=0.9, weight_decay=0.0001)

        dataset_loader = torch.utils.data.DataLoader(
            dataset, batch_size=args.batch_size, shuffle=args.train,
            pin_memory=True, num_workers=args.workers, drop_last=True)
        max_iter = len(dataset_loader)
        losses = AverageMeter()

        best_loss = 1e16
        best_iou = 0

        from time import time

        for i, (inputs_q, targets_q, inputs_s, targets_s, label) in enumerate(dataset_loader):
            lr = args.base_lr * (1 - float(i) / max_iter) ** 0.9
            optimizer.param_groups[0]['lr'] = lr

            inputs_q = inputs_q.cuda()
            targets_q = targets_q.cuda()
            inputs_s = inputs_s.cuda()
            targets_s = targets_s.cuda()
            label = label.cuda()

            attentions, outputs, outputs2, outputs3 = model(x=[inputs_q, targets_q, inputs_s, targets_s],
                                                            training=True, step=args.step)

            loss = criterion(outputs, targets_q)

            losses.update(loss.item(), args.batch_size)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            result_str = ('iter: {0}/{1}\t'
                            'lr: {2:.6f}\t'
                            'loss: {loss.val:.4f} ({loss.ema:.4f})\t'
                            .format(i+1, len(dataset_loader), lr, loss=losses))

            print(result_str)

            writer.add_scalar('training/loss', losses.ema, i)

            if (i + 1) % args.val_interval == 0:
                model.eval()
                with torch.no_grad():
                    val_losses = AverageMeter()

                    num_classes = 20 if args.dataset == 'pascal' else 80
                    tp_list = [0]*num_classes
                    fp_list = [0]*num_classes
                    fn_list = [0]*num_classes
                    iou_list = [0]*num_classes
                    
                    with torch.no_grad():
                        for k in tqdm(range(1000)):
                            if args.dataset == 'pascal':
                                ref_img, ref_label, query_img, query_label, label = ref_imgs[k], \
                                    ref_labels[k], query_imgs[k], query_labels[k], list_labels[k]
                            elif args.dataset == 'coco':
                                query_img, query_label, ref_img, ref_label, label = val_dataset[
                                    k]
                                query_img = query_img.unsqueeze(0)
                                ref_img = ref_img.unsqueeze(0)
                                query_label = query_label.unsqueeze(0)
                                ref_label = ref_label.unsqueeze(0)

                            ref_img, ref_label, query_img, query_label = ref_img.cuda(
                            ), ref_label.cuda(), query_img.cuda(), query_label.cuda()

                            attention, output, output2, _ = model(
                                x=[query_img, query_label, ref_img, ref_label], training=False, step=10)

                            # compute the loss:
                            loss = criterion(output, query_label)
                            val_losses.update(
                                loss.item(), args.batch_size)

                            output = output.argmax(1)

                            pred = output.data.cpu().numpy().astype(np.int32)
                            query_label = query_label.cpu().numpy().astype(np.int32)
                            tp, tn, fp, fn = measure(query_label, pred)

                            if args.dataset == 'pascal':
                                tp_list[label-1] += tp
                                fp_list[label-1] += fp
                                fn_list[label-1] += fn
                            else:
                                tp_list[label] += tp
                                fp_list[label] += fp
                                fn_list[label] += fn

                        iou_list = [
                            tp_list[ic] / float(max(tp_list[ic] + fp_list[ic] + fn_list[ic], 1)) for ic in range(num_classes)]

                        if args.group == 'all':
                            class_indexes = list(range(20))
                        else:
                            class_indexes = list(range(args.group*5, (args.group+1)*5))
                        mIoU = np.mean(np.take(iou_list, class_indexes))
                        print('mIoU:', mIoU)

                    writer.add_scalar('val/loss', val_losses.ema, i)
                    writer.add_scalar('val/mIoU', mIoU, i)

                model.train()  # very important

                if best_loss > val_losses.ema:
                    best_loss = val_losses.ema
                    torch.save({
                        'iteration': i + 1,
                        'state_dict': model.state_dict(),
                        'optimizer': optimizer.state_dict(),
                    }, model_fname + '/best_loss.pth')

                if best_iou < mIoU:
                    best_iou = mIoU
                    torch.save({
                        'iteration': i + 1,
                        'state_dict': model.state_dict(),
                        'optimizer': optimizer.state_dict(),
                    }, model_fname + '/best_iou.pth')

                torch.save({
                    'iteration': i + 1,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                }, model_fname + '/current.pth')

    else:
        # TODO Complete the evaluation with new dataset

        torch.cuda.set_device(args.gpu)
        model = model.cuda()
        model.eval()

        mapping_names = [0, 1, 2, 0, 4, 2, 1, 4, 2, 2]

        checkpoint = torch.load(model_fname[:-1] + str(mapping_names[args.model]) + '/best_iou.pth')
        state_dict = {
            k[7:]: v for k, v in checkpoint['state_dict'].items() if 'tracked' not in k}
        model.load_state_dict(state_dict, strict=False)

        for param in model.parameters():
            param.requires_grad = False

        print(model_fname + '/best_iou.pth')

        num_classes = 20 if args.dataset == 'pascal' else 80
        tp_list = [0]*num_classes
        fp_list = [0]*num_classes
        fn_list = [0]*num_classes
        iou_list = [0]*num_classes

        alpha = 0.8

        with torch.no_grad():
            for k in tqdm(range(1000)):
                if args.num_shots == 1:
                    if args.dataset == 'pascal':
                        ref_img, ref_label, query_img, query_label, label = ref_imgs[k], \
                            ref_labels[k], query_imgs[k], query_labels[k], list_labels[k]

                    elif args.dataset == 'coco':
                        query_img, query_label, ref_img, ref_label, label = val_dataset[k]

                        query_img = query_img.unsqueeze(0)
                        ref_img = ref_img.unsqueeze(0)
                        query_label = query_label.unsqueeze(0)
                        ref_label = ref_label.unsqueeze(0)

                    ref_img, ref_label, query_img, query_label = ref_img.cuda(), \
                        ref_label.cuda(), query_img.cuda(), query_label.cuda()
                else:
                    query_img, query_label, ref_img, ref_label, label = val_dataset[k]

                    query_img = query_img.unsqueeze(0).cuda()
                    query_label = query_label.unsqueeze(0).cuda()
                    
                    ref_img = [x.unsqueeze(0).cuda() for x in ref_img]
                    ref_label = [x.unsqueeze(0).cuda() for x in ref_label]

                attention, output, output2, _ = model(
                    x=[query_img, query_label, ref_img, ref_label], training=False, step=10)

                if args.model in [0, 1, 2, 4, 8]:
                    output = output.argmax(1)
                else:
                    output = output2 > alpha

                pred = output.data.cpu().numpy().astype(np.int32)
                query_label = query_label.cpu().numpy().astype(np.int32)
                tp, tn, fp, fn = measure(query_label, pred)

                if args.dataset == 'pascal':
                    tp_list[label-1] += tp
                    fp_list[label-1] += fp
                    fn_list[label-1] += fn
                else:
                    tp_list[label] += tp
                    fp_list[label] += fp
                    fn_list[label] += fn

            iou_list = [
                tp_list[ic] / float(max(tp_list[ic] + fp_list[ic] + fn_list[ic], 1)) for ic in range(num_classes)]

            print(iou_list)
            class_indexes = list(range(args.group*5, (args.group+1)*5))
            mIoU = np.mean(np.take(iou_list, class_indexes))
            print('mIoU:', mIoU)
Beispiel #17
0
def fibonacci_by_memoize_measure():
    return utils.measure(fibonacci_by_cache())
Beispiel #18
0
def fibonacci_by_cache_measure():
    return utils.measure(fibonacci_bottom_up())
Beispiel #19
0
def main():
    args = load_args()
    data = load_csv_pandas('source.csv')

    # CSV pandas
    save_csv_pandas(data, test=False)

    time, rss = measure(args.cycles, save_csv_pandas, data)
    print('save_csv_pandas', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_csv_pandas)
    print('load_csv_pandas', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_csv_pandas_and_validate)
    print('load_csv_pandas_and_validate', format_time(time), format_rss(rss))

    # CSV default
    save_csv(data, test=False)

    time, rss = measure(args.cycles, save_csv, data)
    print('save_csv', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_csv)
    print('load_csv', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_csv_and_validate)
    print('load_csv_and_validate', format_time(time), format_rss(rss))

    # CSV quote_nonnumeric
    save_csv_quote_nonnumeric(data, test=False)

    time, rss = measure(args.cycles, save_csv_quote_nonnumeric, data)
    print('save_csv_quote_nonnumeric', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_csv_quote_nonnumeric)
    print('load_csv_quote_nonnumeric', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_csv_quote_nonnumeric_and_validate)
    print('load_csv_quote_nonnumeric_and_validate', format_time(time),
          format_rss(rss))

    # JSON default
    save_json(data, test=False)

    time, rss = measure(args.cycles, save_json, data)
    print('save_json', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_json)
    print('load_json', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_json_and_validate)
    print('load_json_and_validate', format_time(time), format_rss(rss))

    # uJSON default
    save_ujson(data, test=False)

    time, rss = measure(args.cycles, save_ujson, data)
    print('save_ujson', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_ujson)
    print('load_ujson', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_ujson_and_validate)
    print('load_ujson_and_validate', format_time(time), format_rss(rss))

    # JSON lines
    save_json_lines(data, test=False)

    time, rss = measure(args.cycles, save_json_lines, data)
    print('save_json_lines', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_json_lines)
    print('load_json_lines', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_json_lines_and_validate)
    print('load_json_lines_and_validate', format_time(time), format_rss(rss))

    # uJSON lines
    save_ujson_lines(data, test=False)

    time, rss = measure(args.cycles, save_ujson_lines, data)
    print('save_ujson_lines', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_ujson_lines)
    print('load_ujson_lines', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_ujson_lines_and_validate)
    print('load_ujson_lines_and_validate', format_time(time), format_rss(rss))

    # msgpack
    save_msgpack(data, test=False)

    time, rss = measure(args.cycles, save_msgpack, data)
    print('save_msgpack', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_msgpack)
    print('load_msgpack', format_time(time), format_rss(rss))

    # msgpack utf
    save_msgpack_utf(data, test=False)

    time, rss = measure(args.cycles, save_msgpack_utf, data)
    print('save_msgpack_utf', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_msgpack_utf)
    print('load_msgpack_utf', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_msgpack_utf_and_validate)
    print('load_msgpack_utf_and_validate', format_time(time), format_rss(rss))

    # msgpack utf stream
    save_msgpack_utf_stream(data, test=False)

    time, rss = measure(args.cycles, save_msgpack_utf_stream, data)
    print('save_msgpack_utf_stream', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_msgpack_utf_stream)
    print('load_msgpack_utf_stream', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_msgpack_utf_stream_and_validate)
    print('load_msgpack_utf_stream_and_validate', format_time(time),
          format_rss(rss))

    # umsgpack
    save_umsgpack(data, test=False)

    time, rss = measure(args.cycles, save_umsgpack, data)
    print('save_umsgpack', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_umsgpack)
    print('load_umsgpack', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_umsgpack_and_validate)
    print('load_umsgpack_and_validate', format_time(time), format_rss(rss))

    # avro
    save_avro(data, test=False)

    time, rss = measure(args.cycles, save_avro, data)
    print('save_avro', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_avro)
    print('load_avro', format_time(time), format_rss(rss))

    # avro fast
    save_avro_fast(data, test=False)

    time, rss = measure(args.cycles, save_avro_fast, data)
    print('save_avro_fast', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_avro_fast)
    print('load_avro_fast', format_time(time), format_rss(rss))

    # protobuf
    save_protobuf(data, test=False)

    time, rss = measure(args.cycles, save_protobuf, data)
    print('save_protobuf', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_protobuf)
    print('load_protobuf', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_protobuf_to_dicts)
    print('load_protobuf_to_dicts', format_time(time), format_rss(rss))

    # capnp
    save_capnp(data, test=False)
    save_capnp_packed(data, test=False)

    time, rss = measure(args.cycles, save_capnp, data)
    print('save_capnp', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, save_capnp_packed, data)
    print('save_capnp_packed', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_capnp)
    print('load_capnp', format_time(time), format_rss(rss))

    time, rss = measure(args.cycles, load_capnp_to_dicts)
    print('load_capnp_to_dicts', format_time(time), format_rss(rss))
Beispiel #20
0
def plot_smc(a, r, b, dt, ts, t_tot, mu_0, sigma_0, sigma_u, Gamma, sigma_m, n,
             filename):

    L = int(ts / dt)

    xs, ys, zs = simulate(
        t_tot,
        mu_0,
        sigma_0,
        a,
        r,
        b,
        dt,
        sigma_u,
        Gamma,
    )
    xs_m = measure(xs, L, sigma_m)

    x_tilde, y_tilde, z_tilde, x, y, z, wxs = classical_smc(
        a, r, b, dt, sigma_u, Gamma, mu_0, sigma_0, ts, t_tot, xs_m, sigma_m,
        n)

    # Histograms
    plot_hist(x, x_tilde, xs, dt, ts, filename[0])
    plot_hist(y, x_tilde, ys, dt, ts, filename[1])

    # Error function
    fig, ax = plt.subplots()
    x_real = np.empty((int(t_tot / ts) + 1, 3))
    x_real[:, 0] = xs[::L]
    x_real[:, 1] = ys[::L]
    x_real[:, 2] = zs[::L]

    a = np.arange(0, int(t_tot / dt) + 1, 1)
    err = np.linalg.norm(x_real - wxs, axis=1)
    plt.plot(a[::L], err, 'b', label="Global error")
    plt.axhline(np.mean(err),
                color='b',
                linestyle='dashed',
                label="Mean global error")
    err_x = np.abs(x_real[:, 0] - wxs[:, 0])
    plt.axhline(np.mean(err_x),
                color='g',
                linestyle='dashed',
                label="Mean error on x")
    #plt.ylim(0, 6)

    legend = ax.legend(loc='upper right')
    for label in legend.get_texts():
        label.set_fontsize('large')

    for label in legend.get_lines():
        label.set_linewidth(1.5)

    if filename[2] is not None:
        fig.savefig(PATH + filename[2], bbox_inches='tight', pad_inches=0)

    plt.close(fig)

    # Particles
    plot_particles(x_tilde, y_tilde, z_tilde, x, y, z, xs_m, wxs, 5, ts,
                   filename[3])
    plot_particles(x_tilde, y_tilde, z_tilde, x, y, z, xs_m, wxs, 15, ts,
                   filename[4])

    # Trajectory of first coordinates
    plot_trajectory(L, t_tot, dt, xs, xs_m, wxs[:, 0], 'x-', filename[5])
    plot_trajectory(L, t_tot, dt, ys, None, wxs[:, 1], 'y-', filename[6])
    plot_trajectory(L, t_tot, dt, zs, None, wxs[:, 2], 'z-', filename[7])

    plt.show()
Beispiel #21
0
                        help='Filter sentences. "L" for line-scores and "B" for box-scores')

    return parser.parse_args()


if __name__ == '__main__':

    args = get_parser()
    cfg.k = args.k
    cfg.emit_epoch = args.e
    cfg.trans_epoch = args.t
    cfg.null_mode = args.m
    cfg.no_num = (args.n > 0)
    cfg.null_ratio = args.r
    cfg.jobs = args.j
    cfg.filter = args.f

    nltk.download('punkt')
    nltk.download('wordnet')
    # cfg.print_info()

    trainer = RWTrainer(cfg)
    todo = args.z

    if todo == 'train':
        trainer.train()
    elif todo == 'test':
        trainer.random_test()

    measure('END')