Exemple #1
0
def answer(topic=None):
    """
    Main rendering function, it processes incoming weather queries.
    Depending on user agent it returns output in HTML or ANSI format.

    Incoming data:
        request.args
        request.headers
        request.remote_addr
        request.referrer
        request.query_string
    """

    user_agent = request.headers.get('User-Agent', '').lower()
    html_needed = is_html_needed(user_agent)
    options = parse_args(request.args)

    if topic in ['apple-touch-icon-precomposed.png', 'apple-touch-icon.png', 'apple-touch-icon-120x120-precomposed.png']:
        return ''

    request_id = request.cookies.get('id')
    if topic is not None and topic.lstrip('/') == ':last':
        if request_id:
            topic = last_query(request_id)
        else:
            return "ERROR: you have to set id for your requests to use /:last\n"
    else:
        if request_id:
            save_query(request_id, topic)

    if request.method == 'POST':
        process_post_request(request, html_needed)
        if html_needed:
            return redirect("/")
        return "OK\n"

    if 'topic' in request.args:
        return redirect("/%s" % request.args.get('topic'))

    if topic is None:
        topic = ":firstpage"

    if topic.startswith(':shell-x/'):
        return _proxy()
        #return requests.get('http://127.0.0.1:3000'+topic[8:]).text

    ip_address = get_request_ip(request)
    if '+' in topic:
        not_allowed = LIMITS.check_ip(ip_address)
        if not_allowed:
            return "429 %s\n" % not_allowed, 429

    html_is_needed = is_html_needed(user_agent)
    result, found = cheat_wrapper(topic, request_options=options, html=html_is_needed)
    if 'Please come back in several hours' in result and html_is_needed:
        return MALFORMED_RESPONSE_HTML_PAGE

    log_query(ip_address, found, topic, user_agent)
    return result
Exemple #2
0
def main():

    # parse the options
    opts = parse_args()

    # create the dataloaders
    dataloader = {'train': create_dataloader('train_valid' if opts.no_validation else 'train', opts),
                  'valid': create_dataloader('valid', opts)}
    
    # create the model 
    model = Prover(opts)
    model.to(opts.device)
  
    # crete the optimizer
    optimizer = torch.optim.RMSprop(model.parameters(), lr=opts.learning_rate,
                                    momentum=opts.momentum,
                                    weight_decay=opts.l2)
    if opts.no_validation:
        scheduler = StepLR(optimizer, step_size=opts.lr_reduce_steps, gamma=0.1) 
    else:
        scheduler = ReduceLROnPlateau(optimizer, patience=opts.lr_reduce_patience, verbose=True)

    # load the checkpoint
    start_epoch = 0
    if opts.resume != None:
        log('loading model checkpoint from %s..' % opts.resume)
        if opts.device.type == 'cpu':
            checkpoint = torch.load(opts.resume, map_location='cpu')
        else:
            checkpoint = torch.load(opts.resume)
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_epoch = checkpoint['n_epoch'] + 1
        model.to(opts.device)

    agent = Agent(model, optimizer, dataloader, opts)

    best_acc = -1.
    for n_epoch in range(start_epoch, start_epoch + opts.num_epochs):
        log('EPOCH #%d' % n_epoch)
   
        # training
        loss_train = agent.train(n_epoch)

        # save the model checkpoint
        if n_epoch % opts.save_model_epochs == 0:
            agent.save(n_epoch, opts.checkpoint_dir)

        # validation
        if not opts.no_validation:
            loss_valid = agent.valid(n_epoch)

        # reduce the learning rate
        if opts.no_validation:
            scheduler.step()
        else:
            scheduler.step(loss_valid)
Exemple #3
0
def main(sys_argv: List[str] = None):
    if sys_argv is None:
        sys_argv = sys.argv[1:]
    conf = parse_args(sys_argv)
    args = DotMap(conf, _dynamic=False)
    if args.meta == 'training':
        training_main(args)
    else:
        raise ValueError
Exemple #4
0
def answer(topic=None):
    """
    Main rendering function, it processes incoming weather queries.
    Depending on user agent it returns output in HTML or ANSI format.

    Incoming data:
        request.args
        request.headers
        request.remote_addr
        request.referrer
        request.query_string
    """

    user_agent = request.headers.get('User-Agent', '').lower()
    html_needed = is_html_needed(user_agent)
    options = parse_args(request.args)

    if topic in ['apple-touch-icon-precomposed.png', 'apple-touch-icon.png', 'apple-touch-icon-120x120-precomposed.png']:
        return ''

    request_id = request.cookies.get('id')
    if topic is not None and topic.lstrip('/') == ':last':
        if request_id:
            topic = last_query(request_id)
        else:
            return "ERROR: you have to set id for your requests to use /:last\n"
    else:
        if request_id:
            save_query(request_id, topic)

    if request.method == 'POST':
        process_post_request(request, html_needed)
        if html_needed:
            return redirect("/")
        return "OK\n"

    if 'topic' in request.args:
        return redirect("/%s" % request.args.get('topic'))

    if topic is None:
        topic = ":firstpage"

    if topic.startswith(':shell-x/'):
        return _proxy()
        #return requests.get('http://127.0.0.1:3000'+topic[8:]).text

    ip_address = get_request_ip(request)
    if '+' in topic:
        not_allowed = LIMITS.check_ip(ip_address)
        if not_allowed:
            return "429 %s\n" % not_allowed, 429

    result, found = cheat_wrapper(topic, request_options=options, html=is_html_needed(user_agent))

    log_query(ip_address, found, topic, user_agent)
    return result
def main(img_path):
    options = parse_args()
    model = Model(options)
    model.load_state_dict(
        torch.load('checkpoint.pth', map_location=torch.device('cpu')))

    corner_pred, icon_pred, room_pred = model(
        torch.tensor(load_image(img_path)))

    corner_heatmaps = corner_pred[0].detach().cpu().numpy()
    icon_heatmaps = torch.nn.functional.softmax(icon_pred[0],
                                                dim=-1).detach().cpu().numpy()
    room_heatmaps = torch.nn.functional.softmax(room_pred[0],
                                                dim=-1).detach().cpu().numpy()

    wallCornerHeatmaps = corner_heatmaps[:, :, :NUM_WALL_CORNERS]
    doorCornerHeatmaps = corner_heatmaps[:, :,
                                         NUM_WALL_CORNERS:NUM_WALL_CORNERS + 4]
    iconCornerHeatmaps = corner_heatmaps[:, :, -4:]

    maps = {
        'original': cv2.imread(img_path),
        'corner_heatmaps': corner_heatmaps.max(-1),
        'icon_heatmaps': icon_heatmaps.max(-1),
        'room_heatmaps': room_heatmaps.max(-1),
        'corner_pred':
        np.squeeze(corner_pred.max(-1)[1].detach().cpu().numpy()),
        'icon_pred': np.squeeze(icon_pred.max(-1)[1].detach().cpu().numpy()),
        'room_pred': np.squeeze(room_pred.max(-1)[1].detach().cpu().numpy()),
        'wallCornerHeatmaps': wallCornerHeatmaps.max(-1),
        'doorCornerHeatmaps': doorCornerHeatmaps.max(-1),
        'iconCornerHeatmaps': iconCornerHeatmaps.max(-1),
    }
    np_print(maps)
    plot_images(maps)

    reconstructFloorplan(wallCornerHeatmaps,
                         doorCornerHeatmaps,
                         iconCornerHeatmaps,
                         icon_heatmaps,
                         room_heatmaps,
                         output_prefix='output-',
                         densityImage=None,
                         gt_dict=None,
                         gt=False,
                         gap=-1,
                         distanceThreshold=-1,
                         lengthThreshold=-1,
                         debug_prefix='test',
                         heatmapValueThresholdWall=None,
                         heatmapValueThresholdDoor=None,
                         heatmapValueThresholdIcon=None,
                         enableAugmentation=True)
Exemple #6
0
def answer(topic=None):
    """
    Main rendering function, it processes incoming weather queries.
    Depending on user agent it returns output in HTML or ANSI format.

    Incoming data:
        request.args
        request.headers
        request.remote_addr
        request.referrer
        request.query_string
    """

    user_agent = request.headers.get('User-Agent', '').lower()
    html_needed = is_html_needed(user_agent)
    options = parse_args(request.args)

    request_id = request.cookies.get('id')
    if topic is not None and topic.lstrip('/') == ':last':
        if request_id:
            topic = last_query(request_id)
        else:
            return "ERROR: you have to set id for your requests to use /:last\n"
    else:
        if request_id:
            save_query(request_id, topic)

    if request.method == 'POST':
        process_post_request(request, html_needed)
        if html_needed:
            return redirect("/")
        return "OK\n"

    if 'topic' in request.args:
        return redirect("/%s" % request.args.get('topic'))

    if topic is None:
        topic = ":firstpage"

    ip_address = get_request_ip(request)
    if '+' in topic:
        not_allowed = LIMITS.check_ip(ip_address)
        if not_allowed:
            return "429 %s\n" % not_allowed, 429

    result, found = cheat_wrapper(topic, request_options=options, html=is_html_needed(user_agent))

    log_query(ip_address, found, topic, user_agent)
    return result
Exemple #7
0
def main(args=None):
    # parse args
    opts = parse_args(args)
    # analyze
    result = analyze(opts)

    if opts.func.__name__ == 'plot':
        # result = pyplot
        if opts.output:
            result.savefig(opts.output)
        else:
            try:
                result.show()
            except KeyboardInterrupt:
                exit(0)
Exemple #8
0
def parse_cmdline(args):
    """
    Parses command line arguments and returns
    query and request_options
    """

    if not args:
        show_usage()
        sys.exit(0)

    query_string = " ".join(args)
    parsed = urlparse.urlparse("https://srv:0/%s" % query_string)
    request_options = options.parse_args(
        urlparse.parse_qs(parsed.query, keep_blank_values=True))

    query = parsed.path.lstrip("/")
    if not query:
        query = ":firstpage"

    return query, request_options
Exemple #9
0
def config_init():
    parser = argparse.ArgumentParser(conflict_handler='resolve')

    # add Lightning parse
    parser = pl.Trainer.add_argparse_args(parser)

    # add common parse
    parser = parse_args(parser)

    # add model specific parser
    parser = LightningSystem.add_model_specific_args(parser)

    config = parser.parse_args()
    if config.seed >= 0:
        utils.set_seed(config.seed)
    config.model_name = config.model_name + config.suffix
    save_path = f"./ckpt/{config.model_name}/"
    config.save_path = save_path
    if config.test is False:
        os.makedirs(config.save_path, exist_ok=True)
    config.log_path = os.path.join(config.log_path, config.model_name + ".txt")
    config.gt_path = os.path.join('data', config.dataset_name, 'gt.json')
    return config
Exemple #10
0
            "is_synthetic",
            "tactic_actions",
            "tactic_str",
        ]
        data_batch = {key: [] for key in fields}
        for example in batch:
            for key, value in example.items():
                if key not in fields:
                    continue
                data_batch[key].append(value)
        return data_batch

    ds = ProofStepsData(split, opts)
    return DataLoader(
        ds,
        opts.batchsize,
        shuffle=split.startswith("train"),
        collate_fn=merge,
        num_workers=opts.num_workers,
    )


if __name__ == "__main__":
    opts = parse_args()
    loader = create_dataloader("train", opts)
    bar = ProgressBar(max_value=len(loader))
    for i, data_batch in enumerate(loader):
        if i == 0:
            print(data_batch)
        bar.update(i)
Exemple #11
0
def main():
    args = parse_args()

    dataloader_train = create_dataloader(args.train_split, True, args)
    dataloader_valid = create_dataloader('valid', True, args)
    dataloader_test = create_dataloader('test', True, args)
    print('%d batches of training examples' % len(dataloader_train))
    print('%d batches of validation examples' % len(dataloader_valid))
    print('%d batches of testing examples' % len(dataloader_test))

    phrase_encoder = RecurrentPhraseEncoder(300, 300)
    if args.model == 'drnet':
        model = DRNet(phrase_encoder, args.feature_dim, args.pretrained)
    elif args.model == 'vtranse':
        model = VtransE(phrase_encoder, args.visual_feature_size, args.predicate_embedding_dim)
    elif args.model == 'vipcnn':
        model = VipCNN(roi_size=args.roi_size, backbone=args.backbone)
    else:
        model = PPRFCN(backbone=args.backbone)
    model.cuda()
    print(model)
    criterion = nn.BCEWithLogitsLoss()
    criterion.cuda()

    optimizer = torch.optim.RMSprop([p for p in model.parameters() if p.requires_grad], lr=args.learning_rate,
                                    momentum=args.momentum,
                                    weight_decay=args.l2)
    if args.train_split == 'train':
        scheduler = ReduceLROnPlateau(optimizer, patience=4, verbose=True)
    else:
        scheduler = StepLR(optimizer, step_size=args.patience, gamma=0.1) 

    start_epoch = 0
    if args.resume != None:
        print(' => loading model checkpoint from %s..' % args.resume)
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        model.cuda()
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_epoch = checkpoint['epoch']

    best_acc = -1.

    for epoch in range(start_epoch, start_epoch + args.n_epochs):
        print('epoch #%d' % epoch)
   
        print('training..')
        loss, acc = train(model, criterion, optimizer, dataloader_train, epoch, args)
        print('\n\ttraining loss = %.4f' % loss)
        print('\ttraining accuracy = %.3f' % acc)

        if args.train_split != 'train_valid':
            print('validating..')
            loss, accs = test('valid', model, criterion, dataloader_valid, epoch, args)
            print('\n\tvalidation loss = %.4f' % loss)
            print('\tvalidation accuracy = %.3f' % accs['overall'])
            for predi in accs:
                if predi != 'overall':
                    print('\t\t%s: %.3f' % (predi, accs[predi]))

        checkpoint_filename = os.path.join(args.log_dir, 'checkpoints/model_%02d.pth' % epoch)
        model.cpu()
        torch.save({'epoch': epoch + 1,
                    'args': args,
                    'state_dict': model.state_dict(),
                    'accuracy': acc,
                    'optimizer' : optimizer.state_dict(),
                   }, checkpoint_filename)
        model.cuda()
     
        if args.train_split != 'train_valid' and best_acc < acc:
            best_acc = acc
            shutil.copyfile(checkpoint_filename, os.path.join(args.log_dir, 'checkpoints/model_best.pth'))
            shutil.copyfile(os.path.join(args.log_dir, 'predictions/pred_%02d.pickle' % epoch), 
                            os.path.join(args.log_dir, 'predictions/pred_best.pickle'))

        if args.train_split == 'train':
            scheduler.step(loss)
        else:
            scheduler.step()

    print('testing..')
    loss, accs = test('test', model, criterion, dataloader_test, None, args)
    print('\n\ttesting loss = %.4f' % loss)
    print('\ttesting accuracy = %.3f' % accs['overall'])
    for predi in accs:
        if predi != 'overall':
            print('\t\t%s: %.3f' % (predi, accs[predi]))
Exemple #12
0
def main():
    args = options.parse_args()
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    gym_logger.setLevel(logging.CRITICAL)
    env_func = partial(get_env, args=args)
    env = get_env(args)
    reward_goal = get_goal(args)
    consecutive_goal_max = 10
    max_iteration = args.epoch
    all_rewards = []
    all_times = []
    all_totals = []
    for trial in range(args.n_trials):
        policy = policies.get_policy(args, env)
        if args.alg == 'ES':
            run_func = partial(envs.run_env_ES,
                               policy=policy,
                               env_func=env_func)
            alg = ESModule(
                policy,
                run_func,
                population_size=args.population_size,  # HYPERPARAMETER
                sigma=args.sigma,  # HYPERPARAMETER
                learning_rate=args.lr,  # HYPERPARAMETER TODO:CHANGE
                threadcount=args.population_size)

        elif args.alg == 'PPO':
            run_func = partial(envs.run_env_PPO,
                               env_func=env_func)  # TODO: update
            alg = PPOModule(
                policy,
                run_func,
                n_updates=args.n_updates,  # HYPERPARAMETER
                batch_size=args.batch_size,  # HYPERPARAMETER
                max_steps=args.max_steps,
                gamma=args.gamma,
                clip=args.clip,
                ent_coeff=args.ent_coeff,
                learning_rate=args.lr)  # TODO: CHANGE

        elif args.alg == 'ESPPO':
            run_func = partial(envs.run_env_PPO, env_func=env_func)

            alg = ESPPOModule(
                policy,
                run_func,
                population_size=args.population_size,  # HYPERPARAMETER
                sigma=args.sigma,  # HYPERPARAMETER
                n_updates=args.n_updates,  # HYPERPARAMETER
                batch_size=args.batch_size,  # HYPERPARAMETER
                max_steps=args.max_steps,
                gamma=args.gamma,
                clip=args.clip,
                ent_coeff=args.ent_coeff,
                n_seq=args.n_seq,
                ppo_learning_rate=args.ppo_lr,
                es_learning_rate=args.es_lr,
                threadcount=args.population_size)

        elif args.alg == 'MAXPPO':
            run_func = partial(envs.run_env_PPO, env_func=env_func)

            alg = MaxPPOModule(
                policy,
                run_func,
                population_size=args.population_size,  # HYPERPARAMETER
                sigma=args.sigma,  # HYPERPARAMETER
                n_updates=args.n_updates,  # HYPERPARAMETER
                batch_size=args.batch_size,  # HYPERPARAMETER
                max_steps=args.max_steps,
                gamma=args.gamma,
                clip=args.clip,
                ent_coeff=args.ent_coeff,
                n_seq=args.n_seq,
                ppo_learning_rate=args.ppo_lr,
                threadcount=args.population_size)

        elif args.alg == 'ALTPPO':
            run_func = partial(envs.run_env_PPO, env_func=env_func)

            alg = AltPPOModule(
                policy,
                run_func,
                population_size=args.population_size,  # HYPERPARAMETER
                sigma=args.sigma,  # HYPERPARAMETER
                n_updates=args.n_updates,  # HYPERPARAMETER
                batch_size=args.batch_size,  # HYPERPARAMETER
                max_steps=args.max_steps,
                gamma=args.gamma,
                clip=args.clip,
                ent_coeff=args.ent_coeff,
                n_alt=args.n_alt,
                es_learning_rate=args.es_lr,
                ppo_learning_rate=args.ppo_lr,
                threadcount=args.population_size)

        if args.render:
            with open(os.path.join(args.directory, 'weights.pkl'), 'rb') as fp:
                weights = pickle.load(fp)
                policy.load_state_dict(weights)

            if args.alg == 'ES':
                total_reward = run_func(weights, stochastic=False, render=True)
            else:
                total_reward = run_func(policy,
                                        stochastic=False,
                                        render=True,
                                        reward_only=True)
            print(f"Total rewards from episode: {total_rewards}")
            return

        exp_dir = os.path.join(args.directory, alg.model_name)
        if not os.path.exists(exp_dir):
            os.makedirs(exp_dir)

        start = time.time()
        consecutive_goal_count = 0
        iteration = 0
        rewards = []
        while True:
            if iteration >= max_iteration:
                break
            weights = alg.step()
            if (iteration + 1) % 10 == 0:
                if args.alg == 'ES':
                    test_reward = run_func(weights,
                                           stochastic=False,
                                           render=False)
                else:
                    test_reward = run_func(policy,
                                           stochastic=False,
                                           render=False,
                                           reward_only=True)
                rewards.append(test_reward)
                print('iter %d. reward: %f' % (iteration + 1, test_reward))

                if consecutive_goal_max and reward_goal:
                    consecutive_goal_count = consecutive_goal_count + 1 if test_reward >= reward_goal else 0
                    if consecutive_goal_count >= consecutive_goal_max:
                        break
            iteration += 1
        end = time.time() - start
        if args.alg == 'ES':
            total_reward = run_func(weights, stochastic=False, render=False)
        else:
            total_reward = run_func(policy,
                                    stochastic=False,
                                    render=False,
                                    reward_only=True)
        all_rewards.append(rewards)
        all_times.append(end)
        all_totals.append(total_reward)
        print(f"Reward from final weights: {total_reward}")
        print(f"Time to completion: {end}")
    max_len = 0
    for rewards in all_rewards:
        if len(rewards) > max_len:
            max_len = len(rewards)
    for rewards in all_rewards:
        while len(rewards) < max_len:
            rewards.append(reward_goal)
        rewards = np.array(rewards)
    all_rewards = np.array(all_rewards)
    rewards_mean = np.mean(all_rewards, axis=0)
    rewards_std = np.std(all_rewards, axis=0)
    total_mean = np.mean(all_totals)
    time_mean = np.mean(all_times)
    plt.errorbar(np.arange(max_len),
                 rewards_mean,
                 yerr=rewards_std,
                 label='rewards')
    plt.legend(loc=4)
    plt.grid(True)
    plt.tight_layout()
    path = os.path.join(exp_dir, "rewards_plot.png")
    plt.savefig(path)
    plt.close()
    np.savetxt(os.path.join(exp_dir, 'rewards.txt'), rewards_mean)
    pickle.dump(weights, open(os.path.join(exp_dir, 'weights.pkl'), 'wb'))
    out_file = open(os.path.join(exp_dir, "results.txt"), 'w')
    print(f"Average rewards from final weights: {total_mean}")
    msg = f"Average rewards from final weights: {total_mean}"
    msg += "\n"
    print(f"Average time to completion: {time_mean}")
    msg += f"Average time to completion: {time_mean}"
    msg += "\n"
    print(f"Results saved at: {exp_dir}")
    out_file.write(msg)
    out_file.flush()
Exemple #13
0
            if vocab_subset is not None:
                # filter log_probs
                filter_logprob_indices, index_list = model.init_indices_for_filter_logprobs(
                    vocab_subset)
                filtered_log_probs_list = model.filter_logprobs(
                    original_log_probs_list, filter_logprob_indices)
                print(filtered_log_probs_list)
            else:
                filtered_log_probs_list = original_log_probs_list

            ret = {}
            # rank over the subset of the vocab (if defined) for the SINGLE masked tokens
            if masked_indices and len(masked_indices) > 0:
                ret = evaluation_metrics.get_ranking(
                    filtered_log_probs_list[0],
                    masked_indices,
                    model.vocab,
                    index_list=index_list)

            # prediction and perplexity for the whole softmax
            # print_sentence_predictions(original_log_probs_list[0], token_ids, model.vocab, masked_indices=masked_indices

    for r in ret:
        print("%s %s" % (r, ret[r]))


if __name__ == '__main__':
    parser = options.get_eval_generation_parser()
    args = options.parse_args(parser)
    main(args)
Exemple #14
0
def main():
    args = parse_args()

    dataloader_train = create_dataloader(args.train_split, False, args)
    dataloader_valid = create_dataloader('valid', False, args)
    dataloader_test = create_dataloader('test', False, args)
    print('%d batches of training examples' % len(dataloader_train))
    print('%d batches of validation examples' % len(dataloader_valid))
    print('%d batches of testing examples' % len(dataloader_test))

    phrase_encoder = RecurrentPhraseEncoder(300, 300)
    model = SimpleLanguageOnlyModel(phrase_encoder, args.feature_dim, 9)
    criterion = nn.BCEWithLogitsLoss()
    if torch.cuda.is_available():
        model.cuda()
        criterion.cuda()

    optimizer = torch.optim.RMSprop(model.parameters(),
                                    lr=args.learning_rate,
                                    momentum=args.momentum,
                                    weight_decay=args.l2)
    if args.train_split == 'train_valid':
        scheduler = StepLR(optimizer, step_size=8, gamma=0.1)
    else:
        scheduler = ReduceLROnPlateau(optimizer, patience=5, verbose=True)

    best_acc = -1.

    for epoch in range(args.n_epochs):
        print('epoch #%d' % epoch)

        print('training..')
        loss, acc = train(model, criterion, optimizer, dataloader_train, epoch,
                          args)
        print('\n\ttraining loss = %.4f' % loss)
        print('\ttraining accuracy = %.3f' % acc)

        print('validating..')
        loss, accs = test('valid', model, criterion, dataloader_valid, epoch,
                          args)
        print('\n\tvalidation loss = %.4f' % loss)
        print('\tvalidation accuracy = %.3f' % accs['overall'])
        for predi in accs:
            if predi != 'overall':
                print('\t\t%s: %.3f' % (predi, accs[predi]))

        checkpoint_filename = os.path.join(
            args.log_dir, 'checkpoints/model_%02d.pth' % epoch)
        model.cpu()
        torch.save(
            {
                'epoch': epoch + 1,
                'args': args,
                'state_dict': model.state_dict(),
                'accuracy': acc,
                'optimizer': optimizer.state_dict(),
            }, checkpoint_filename)
        if torch.cuda.is_available():
            model.cuda()
        if best_acc < acc:
            best_acc = acc
            shutil.copyfile(
                checkpoint_filename,
                os.path.join(args.log_dir, 'checkpoints/model_best.pth'))
            shutil.copyfile(
                os.path.join(args.log_dir,
                             'predictions/pred_%02d.pickle' % epoch),
                os.path.join(args.log_dir, 'predictions/pred_best.pickle'))

        if args.train_split == 'train_valid':
            scheduler.step()
        else:
            scheduler.step(loss)

    print('testing..')
    loss, accs = test('test', model, criterion, dataloader_test, epoch, args)
    print('\n\ttesting loss = %.4f' % loss)
    print('\ttesting accuracy = %.3f' % accs['overall'])
    for predi in accs:
        if predi != 'overall':
            print('\t\t%s: %.3f' % (predi, accs[predi]))
Exemple #15
0
            print("GG! Best accuracy {:f}".format(max_acc))

        if ((epoch + 1) % params.save_freq == 0) or (epoch == stop_epoch - 1):
            outfile = os.path.join(params.checkpoint_dir,
                                   '{:d}.tar'.format(epoch))
            torch.save({'epoch': epoch, 'state': model.state_dict()}, outfile)
    return model


# --- main function ---
if __name__ == '__main__':
    # set numpy random seed
    np.random.seed(10)

    # parser argument
    params = parse_args()
    print('--- Training ---\n')
    print(params)

    # output and tensorboard dir
    params.checkpoint_dir = '%s/checkpoints/%s' % (params.save_dir,
                                                   params.name)
    if not os.path.isdir(params.checkpoint_dir):
        os.makedirs(params.checkpoint_dir)

    # dataloader
    print('\n--- Prepare dataloader ---')
    print('\ttrain with seen domain {}'.format(params.dataset))
    print('\tval with seen domain {}'.format(params.testset))
    base_file = os.path.join(params.data_dir, params.dataset, 'base.json')
    val_file = os.path.join(params.data_dir, params.testset, 'val.json')
Exemple #16
0
import os
import logging
import numpy as np
import random
import pickle

import torch

# Env
from data_loaders import *
from options import parse_args
from train_test import train, test


### 1. Initializes parser and device
opt = parse_args()
device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu')
print("Using device:", device)
if not os.path.exists(opt.checkpoints_dir): os.makedirs(opt.checkpoints_dir)
if not os.path.exists(os.path.join(opt.checkpoints_dir, opt.exp_name)): os.makedirs(os.path.join(opt.checkpoints_dir, opt.exp_name))
if not os.path.exists(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name)): os.makedirs(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name))

### 2. Initializes Data
ignore_missing_histype = 1 if 'grad' in opt.task else 0
ignore_missing_moltype = 1 if 'omic' in opt.mode else 0
use_patch, roi_dir = ('_patch_', 'all_st_patches_512') if opt.use_vgg_features else ('_', 'all_st')
use_rnaseq = '_rnaseq' if opt.use_rnaseq else ''

data_cv_path = '%s/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (opt.dataroot, roi_dir, ignore_missing_moltype, ignore_missing_histype, opt.use_vgg_features, use_rnaseq)
print("Loading %s" % data_cv_path)
data_cv = pickle.load(open(data_cv_path, 'rb'))
Exemple #17
0
def main(args):

    """ main function"""

    sys.excepthook = excepthook
    random_init()

    qname, qtype, qclass, csv = parse_args(args[1:])

    try:
        qtype_val = qt.get_val(qtype)
    except KeyError:
        raise UsageError("ERROR: invalid query type: {}\n".format(qtype))

    try:
        qclass_val = qc.get_val(qclass)
    except KeyError:
        raise UsageError("ERROR: invalid query class: {}\n".format(qclass))
    if csv==0:
        query = DNSquery(qname, qtype_val, qclass_val)
        try:
            server_addr, port, family, _ = \
                         get_socketparams(options["server"], options["port"],
                                          options["af"], socket.SOCK_DGRAM)
        except socket.gaierror as e:
            raise ErrorMessage("bad server: %s (%s)" % (options["server"], e))
        
        request = query.get_message()
    else:
        doc = pd.read_csv(qname)
        X = doc.iloc[:,0].values.astype(str)
        print(len(X))
        for i in X:
            print(i)
            i += "."
            query = DNSquery(i, qtype_val, qclass_val)
            try:
                server_addr, port, family, _ = \
                             get_socketparams(options["server"], options["port"],
                                              options["af"], socket.SOCK_DGRAM)
            except socket.gaierror as e:
                raise ErrorMessage("bad server: %s (%s)" % (options["server"], e))
            request = query.get_message()
            (responsepkt, responder_addr) = \
                      send_request_udp(request, server_addr, port, family,
                                       ITIMEOUT, RETRIES)
            response = DNSresponse(family, query, responsepkt)
            doc["get"] = str(response.decode_sections())
        doc.to_csv(qname,index=False)
    response = None

    if not options["use_tcp"]:
        t1 = time.time()
        (responsepkt, responder_addr) = \
                      send_request_udp(request, server_addr, port, family,
                                       ITIMEOUT, RETRIES)
        t2 = time.time()
        if not responsepkt:
            raise ErrorMessage("No response from server")
        response = DNSresponse(family, query, responsepkt)
        if not response.tc:
            print(";; UDP response from %s, %d bytes, in %.3f sec" %
                  (responder_addr, response.msglen, (t2-t1)))
            if not is_multicast(server_addr) and \
               server_addr != "0.0.0.0" and responder_addr[0] != server_addr:
                print("WARNING: Response from unexpected address %s" %
                      responder_addr[0])

    if options["use_tcp"] or (response and response.tc) \
       or (options["tls"] and options["tls_fallback"] and not response):
        if response and response.tc:
            if options["ignore"]:
                print(";; UDP Response was truncated.")
            else:
                print(";; UDP Response was truncated. Retrying using TCP ...")
        if options["tls"] and options["tls_fallback"] and not response:
            print(";; TLS fallback to TCP ...")
        if not options["ignore"]:
            t1 = time.time()
            responsepkt = send_request_tcp(request, server_addr, port, family)
            t2 = time.time()
            response = DNSresponse(family, query, responsepkt)
            print(";; TCP response from %s, %d bytes, in %.3f sec" %
                  ((server_addr, port), response.msglen, (t2-t1)))

    response.print_all()
    dprint("Compression pointer dereferences=%d" % Stats.compression_cnt)

    return response.rcode
Exemple #18
0
def main():
    args = parse_args()

    dataloader_train = create_dataloader(args.train_split, True, args)
    dataloader_valid = create_dataloader("valid", True, args)
    dataloader_test = create_dataloader("test", True, args)
    print("%d batches of training examples" % len(dataloader_train))
    print("%d batches of validation examples" % len(dataloader_valid))
    print("%d batches of testing examples" % len(dataloader_test))

    phrase_encoder = RecurrentPhraseEncoder(300, 300)
    if args.model == "drnet":
        model = DRNet(phrase_encoder, args.feature_dim)
    elif args.model == "vtranse":
        model = VtransE(phrase_encoder, args.visual_feature_size,
                        args.predicate_embedding_dim)
    elif args.model == "vipcnn":
        model = VipCNN(roi_size=args.roi_size, backbone=args.backbone)
    else:
        model = PPRFCN(backbone=args.backbone)
    model.cuda()
    print(model)
    criterion = nn.BCEWithLogitsLoss()
    criterion.cuda()

    optimizer = torch.optim.RMSprop(
        [p for p in model.parameters() if p.requires_grad],
        lr=args.learning_rate,
        momentum=args.momentum,
        weight_decay=args.l2,
    )
    if args.train_split == "train":
        scheduler = ReduceLROnPlateau(optimizer, patience=4, verbose=True)
    else:
        scheduler = StepLR(optimizer, step_size=args.patience, gamma=0.1)

    start_epoch = 0
    if args.resume != None:
        print(" => loading model checkpoint from %s.." % args.resume)
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint["state_dict"])
        model.cuda()
        optimizer.load_state_dict(checkpoint["optimizer"])
        start_epoch = checkpoint["epoch"]

    best_acc = -1.0

    for epoch in range(start_epoch, start_epoch + args.n_epochs):
        print("epoch #%d" % epoch)

        print("training..")
        loss, acc = train(model, criterion, optimizer, dataloader_train, epoch,
                          args)
        print("\n\ttraining loss = %.4f" % loss)
        print("\ttraining accuracy = %.3f" % acc)

        if args.train_split != "train_valid":
            print("validating..")
            loss, accs = test("valid", model, criterion, dataloader_valid,
                              epoch, args)
            print("\n\tvalidation loss = %.4f" % loss)
            print("\tvalidation accuracy = %.3f" % accs["overall"])
            for predi in accs:
                if predi != "overall":
                    print("\t\t%s: %.3f" % (predi, accs[predi]))

        checkpoint_filename = os.path.join(
            args.log_dir, "checkpoints/model_%02d.pth" % epoch)
        model.cpu()
        torch.save(
            {
                "epoch": epoch + 1,
                "args": args,
                "state_dict": model.state_dict(),
                "accuracy": acc,
                "optimizer": optimizer.state_dict(),
            },
            checkpoint_filename,
        )
        model.cuda()

        if args.train_split != "train_valid" and best_acc < acc:
            best_acc = acc
            shutil.copyfile(
                checkpoint_filename,
                os.path.join(args.log_dir, "checkpoints/model_best.pth"),
            )
            shutil.copyfile(
                os.path.join(args.log_dir,
                             "predictions/pred_%02d.pickle" % epoch),
                os.path.join(args.log_dir, "predictions/pred_best.pickle"),
            )

        if args.train_split == "train":
            scheduler.step(loss)
        else:
            scheduler.step()

    print("testing..")
    loss, accs = test("test", model, criterion, dataloader_test, None, args)
    print("\n\ttesting loss = %.4f" % loss)
    print("\ttesting accuracy = %.3f" % accs["overall"])
    for predi in accs:
        if predi != "overall":
            print("\t\t%s: %.3f" % (predi, accs[predi]))
Exemple #19
0
        if ((epoch + 1) % params.save_freq == 0) or (epoch == stop_epoch - 1):
            outfile = os.path.join(params.checkpoint_dir,
                                   '{:d}.tar'.format(epoch + 1))
            model.save(outfile, epoch)
        outfile = os.path.join(params.checkpoint_dir, name + '_trlog')
        torch.save(trlog, outfile)
    return


# --- main function ---
if __name__ == '__main__':

    # set numpy random seed
    np.random.seed(10)
    # parse argument
    params = parse_args('train')
    if params.mode in ['onlytrain', 'train_and_test']:
        if params.train_aug == 'True':
            name = 'original_shot_' + str(params.n_shot) + '_query_' + str(
                params.n_query
            ) + '_' + params.method + '_' + params.testset + '_aug'

        elif params.train_aug == 'False':
            name = 'original_shot_' + str(params.n_shot) + '_query_' + str(
                params.n_query) + '_' + params.method + '_' + params.testset
        print('--- LFTNet training: {} ---\n'.format(name))
        print(params)

        # output and tensorboard dir
        params.tf_dir = '%s/log/%s' % (params.save_dir, name)
        params.checkpoint_dir = '%s/checkmodels/%s' % (params.save_dir, name)
def explain_relationnet():
    # print(sys.path)
    params = options.parse_args('test')
    feature_model = backbone.model_dict['ResNet10']
    params.method = 'relationnet'
    params.dataset = 'miniImagenet'  # name relationnet --testset miniImagenet
    params.name = 'relationnet'
    params.testset = 'miniImagenet'
    params.data_dir = '/home/sunjiamei/work/fewshotlearning/dataset/'
    params.save_dir = '/home/sunjiamei/work/fewshotlearning/CrossDomainFewShot-master/output'

    if 'Conv' in params.model:
        image_size = 84
    else:
        image_size = 224
    split = params.split
    n_query = 1
    loadfile = os.path.join(params.data_dir, params.testset, split + '.json')
    few_shot_params = dict(n_way=params.test_n_way, n_support=params.n_shot)
    data_datamgr = SetDataManager(image_size,
                                  n_query=n_query,
                                  **few_shot_params)
    data_loader = data_datamgr.get_data_loader(loadfile, aug=False)

    acc_all = []
    iter_num = 1000

    # model
    print('  build metric-based model')
    if params.method == 'protonet':
        model = ProtoNet(backbone.model_dict[params.model], **few_shot_params)
    elif params.method == 'matchingnet':
        model = MatchingNet(backbone.model_dict[params.model],
                            **few_shot_params)
    elif params.method == 'gnnnet':
        model = GnnNet(backbone.model_dict[params.model], **few_shot_params)
    elif params.method in ['relationnet', 'relationnet_softmax']:
        if params.model == 'Conv4':
            feature_model = backbone.Conv4NP
        elif params.model == 'Conv6':
            feature_model = backbone.Conv6NP
        else:
            feature_model = backbone.model_dict[params.model]
        loss_type = 'LRPmse'
        model = RelationNet(feature_model,
                            loss_type=loss_type,
                            **few_shot_params)
    else:
        raise ValueError('Unknown method')

    checkpoint_dir = '%s/checkpoints/%s' % (params.save_dir, params.name)
    # print(checkpoint_dir)
    if params.save_epoch != -1:
        modelfile = get_assigned_file(checkpoint_dir, params.save_epoch)
    else:
        modelfile = get_best_file(checkpoint_dir)
        # print(modelfile)
    if modelfile is not None:
        tmp = torch.load(modelfile)
        try:
            model.load_state_dict(tmp['state'])
        except RuntimeError:
            print('warning! RuntimeError when load_state_dict()!')
            model.load_state_dict(tmp['state'], strict=False)
        except KeyError:
            for k in tmp['model_state']:  ##### revise latter
                if 'running' in k:
                    tmp['model_state'][k] = tmp['model_state'][k].squeeze()
            model.load_state_dict(tmp['model_state'], strict=False)
        except:
            raise

    model = model.cuda()
    model.eval()
    model.n_query = n_query
    # ---test the accuracy on the test set to verify the model is loaded----
    acc = 0
    count = 0
    # for i, (x, y) in enumerate(data_loader):
    #   scores = model.set_forward(x)
    #   pred = scores.data.cpu().numpy().argmax(axis=1)
    #   y = np.repeat(range(model.n_way), n_query)
    #   acc += np.sum(pred == y)
    #   count += len(y)
    #   # print(1.0*acc/count)
    # print(1.0*acc/count)
    preset = lrp_presets.SequentialPresetA()

    feature_model = copy.deepcopy(model.feature)
    lrp_wrapper.add_lrp(feature_model, preset=preset)
    relation_model = copy.deepcopy(model.relation_module)
    # print(relation_model)
    lrp_wrapper.add_lrp(relation_model, preset=preset)
    with open(
            '/home/sunjiamei/work/fewshotlearning/dataset/miniImagenet/class_to_readablelabel.json',
            'r') as f:
        class_to_readable = json.load(f)
    explanation_save_dir = os.path.join(params.save_dir, 'explanations',
                                        params.name)
    if not os.path.isdir(explanation_save_dir):
        os.makedirs(explanation_save_dir)
    for i, (x, y, p) in enumerate(data_loader):
        '''x is the images with shape as n_way, n_support + n_querry, 3, img_size, img_size
       y is the global labels of the images with shape as (n_way, n_support + n_query)
       p is the image path as a list of tuples, length is n_query+n_support,  each tuple element is with length n_way'''
        if i >= 3:
            break
        label_to_readableclass, query_img_path, query_gt_class = LRPutil.get_class_label(
            p, class_to_readable, model.n_query)
        z_support, z_query = model.parse_feature(x, is_feature=False)
        z_support = z_support.contiguous()
        z_proto = z_support.view(model.n_way, model.n_support,
                                 *model.feat_dim).mean(1)
        # print(z_proto.shape)
        z_query = z_query.contiguous().view(model.n_way * model.n_query,
                                            *model.feat_dim)
        # print(z_query.shape)
        # get relations with metric function
        z_proto_ext = z_proto.unsqueeze(0).repeat(model.n_query * model.n_way,
                                                  1, 1, 1, 1)
        # print(z_proto_ext.shape)
        z_query_ext = z_query.unsqueeze(0).repeat(model.n_way, 1, 1, 1, 1)

        z_query_ext = torch.transpose(z_query_ext, 0, 1)
        # print(z_query_ext.shape)
        extend_final_feat_dim = model.feat_dim.copy()
        extend_final_feat_dim[0] *= 2
        relation_pairs = torch.cat((z_proto_ext, z_query_ext),
                                   2).view(-1, *extend_final_feat_dim)
        # print(relation_pairs.shape)
        relations = relation_model(relation_pairs)
        # print(relations)
        scores = relations.view(-1, model.n_way)
        preds = scores.data.cpu().numpy().argmax(axis=1)
        # print(preds.shape)
        relations = relations.view(-1, model.n_way)
        # print(relations)
        relations_sf = torch.softmax(relations, dim=-1)
        # print(relations_sf)
        relations_logits = torch.log(LRPutil.LOGIT_BETA * relations_sf /
                                     (1 - relations_sf))
        # print(relations_logits)
        # print(preds)
        relations_logits = relations_logits.view(-1, 1)
        relevance_relations = relation_model.compute_lrp(
            relation_pairs, target=relations_logits)
        # print(relevance_relations.shape)
        # print(model.feat_dim)
        relevance_z_query = torch.narrow(relevance_relations, 1,
                                         model.feat_dim[0], model.feat_dim[0])
        # print(relevance_z_query.shape)
        relevance_z_query = relevance_z_query.view(
            model.n_query * model.n_way, model.n_way,
            *relevance_z_query.size()[1:])
        # print(relevance_z_query.shape)
        query_img = x.narrow(1, model.n_support,
                             model.n_query).view(model.n_way * model.n_query,
                                                 *x.size()[2:])
        # query_img_copy = query_img.view(model.n_way, model.n_query, *x.size()[2:])
        # print(query_img.shape)
        for k in range(model.n_way):
            relevance_querry_cls = torch.narrow(relevance_z_query, 1, k,
                                                1).squeeze(1)
            # print(relevance_querry_cls.shape)
            relevance_querry_img = feature_model.compute_lrp(
                query_img.cuda(), target=relevance_querry_cls)
            # print(relevance_querry_img.max(), relevance_querry_img.min())
            # print(relevance_querry_img.shape)
            for j in range(model.n_query * model.n_way):
                predict_class = label_to_readableclass[preds[j]]
                true_class = query_gt_class[int(j % model.n_way)][int(
                    j // model.n_way)]
                explain_class = label_to_readableclass[k]
                img_name = query_img_path[int(j % model.n_way)][int(
                    j // model.n_way)].split('/')[-1]
                if not os.path.isdir(
                        os.path.join(explanation_save_dir, 'episode' + str(i),
                                     img_name.strip('.jpg'))):
                    os.makedirs(
                        os.path.join(explanation_save_dir, 'episode' + str(i),
                                     img_name.strip('.jpg')))
                save_path = os.path.join(explanation_save_dir,
                                         'episode' + str(i),
                                         img_name.strip('.jpg'))
                if not os.path.exists(
                        os.path.join(
                            save_path,
                            true_class + '_' + predict_class + img_name)):
                    original_img = Image.fromarray(
                        np.uint8(
                            project(query_img[j].permute(1, 2,
                                                         0).cpu().numpy())))
                    original_img.save(
                        os.path.join(
                            save_path,
                            true_class + '_' + predict_class + img_name))

                img_relevance = relevance_querry_img.narrow(0, j, 1)
                print(predict_class, true_class, explain_class)
                # assert relevance_querry_cls[j].sum() != 0
                # assert img_relevance.sum()!=0
                hm = img_relevance.permute(0, 2, 3, 1).cpu().detach().numpy()
                hm = LRPutil.gamma(hm)
                hm = LRPutil.heatmap(hm)[0]
                hm = project(hm)
                hp_img = Image.fromarray(np.uint8(hm))
                hp_img.save(
                    os.path.join(
                        save_path,
                        true_class + '_' + explain_class + '_lrp_hm.jpg'))
Exemple #21
0
        if ((epoch + 1) % params.save_freq == 0) or (epoch == stop_epoch - 1):
            outfile = os.path.join(params.checkpoint_dir,
                                   '{:d}.tar'.format(epoch))
            torch.save({'epoch': epoch, 'state': model.state_dict()}, outfile)

    return model


# --- main function ---
if __name__ == '__main__':

    # set numpy random seed
    np.random.seed(10)

    # parser argument
    params = parse_args(
        'train')  #model_dict=conv4, conv6, resnet10, resnet18,resnet34
    print('--- baseline training: {} ---\n'.format(params.name))
    print(params)

    # output and tensorboard dir
    params.tf_dir = '%s/log/%s' % (params.save_dir, params.name)
    params.checkpoint_dir = '%s/checkpoints/%s' % (params.save_dir,
                                                   params.name)
    if not os.path.isdir(params.checkpoint_dir):
        os.makedirs(params.checkpoint_dir)

    # dataloader
    print('\n--- prepare dataloader ---')
    if params.dataset == 'multi':
        print('  train with multiple seen domains (unseen domain: {})'.format(
            params.testset))
Exemple #22
0
from torch.utils.data import DataLoader

import models
from options import parse_args
from utils import Logger, AverageMeter, ClassErrorMeter
from datasets import get_dataset
from utils.measure_v2 import measure

torch.backends.cudnn.benchmark = True

###########################
# DEFINE GLOBAL VARIABLES #
###########################

# parse arguments
args, model_args = parse_args()

# define logger
logdir = args.logdir
logger = Logger(logdir, read_only=args.test_only)
logger.log('args: %s'%str(args))
logger.log('model args: %s'%str(model_args))

# define model
model = models.__dict__[args.model](**vars(model_args)).cuda()
# logger.log('full-model FLOPs: %d' % measure(model, torch.zeros(1, 3, 32, 32).cuda())[0])

# define datasets - 0: train, 1: val, 2: test
datasets = get_dataset(args.dataset, val_size=args.valsize)
dataloaders = []
for d in datasets:
Exemple #23
0
def main():
    args = parse_args()

    dataloader_train = create_dataloader(args.train_split, False, args)
    dataloader_valid = create_dataloader("valid", False, args)
    dataloader_test = create_dataloader("test", False, args)
    print("%d batches of training examples" % len(dataloader_train))
    print("%d batches of validation examples" % len(dataloader_valid))
    print("%d batches of testing examples" % len(dataloader_test))

    model = SimpleSpatialModel(4, args.feature_dim, 9)
    print(model)
    criterion = nn.BCEWithLogitsLoss()
    if torch.cuda.is_available():
        model.cuda()
        criterion.cuda()

    optimizer = torch.optim.RMSprop(
        model.parameters(),
        lr=args.learning_rate,
        momentum=args.momentum,
        weight_decay=args.l2,
    )
    if args.train_split == "train_valid":
        scheduler = StepLR(optimizer, step_size=21, gamma=0.1)
    else:
        scheduler = ReduceLROnPlateau(optimizer, patience=5, verbose=True)
    best_acc = -1.0

    for epoch in range(args.n_epochs):
        print("epoch #%d" % epoch)

        print("training..")
        loss, acc = train(model, criterion, optimizer, dataloader_train, epoch, args)
        print("\n\ttraining loss = %.4f" % loss)
        print("\ttraining accuracy = %.3f" % acc)

        print("validating..")
        loss, accs = test("valid", model, criterion, dataloader_valid, epoch, args)
        print("\n\tvalidation loss = %.4f" % loss)
        print("\tvalidation accuracy = %.3f" % accs["overall"])
        for predi in accs:
            if predi != "overall":
                print("\t\t%s: %.3f" % (predi, accs[predi]))

        checkpoint_filename = os.path.join(
            args.log_dir, "checkpoints/model_%02d.pth" % epoch
        )
        model.cpu()
        torch.save(
            {
                "epoch": epoch + 1,
                "args": args,
                "state_dict": model.state_dict(),
                "accuracy": acc,
                "optimizer": optimizer.state_dict(),
            },
            checkpoint_filename,
        )
        if torch.cuda.is_available():
            model.cuda()

        if best_acc < acc:
            best_acc = acc
            shutil.copyfile(
                checkpoint_filename,
                os.path.join(args.log_dir, "checkpoints/model_best.pth"),
            )
            shutil.copyfile(
                os.path.join(args.log_dir, "predictions/pred_%02d.pickle" % epoch),
                os.path.join(args.log_dir, "predictions/pred_best.pickle"),
            )

        if args.train_split == "train_valid":
            scheduler.step()
        else:
            scheduler.step(loss)

    print("testing..")
    _, accs = test("test", model, criterion, dataloader_test, epoch, args)
    print("\ttesting accuracies = %.3f" % accs["overall"])
    for predi in accs:
        if predi != "overall":
            print("\t\t%s: %.3f" % (predi, accs[predi]))
Exemple #24
0
        ])
    z_all = torch.from_numpy(np.array(z_all))

    model.n_query = n_query
    scores = model.set_forward(z_all, is_feature=True)
    pred = scores.data.cpu().numpy().argmax(axis=1)
    y = np.repeat(range(n_way), n_query)
    acc = np.mean(pred == y) * 100
    return acc


# --- main ---
if __name__ == '__main__':

    # parse argument
    params = parse_args('test')
    print('Testing! {} shots on {} dataset with {} epochs of {}({})'.format(
        params.n_shot, params.dataset, params.save_epoch, params.name,
        params.method))
    remove_featurefile = True

    print('\nStage 1: saving features')
    # dataset
    print('  build dataset')
    if 'Conv' in params.model:
        image_size = 84
    else:
        image_size = 224
    split = params.split
    loadfile = os.path.join(params.data_dir, params.dataset, split + '.json')
    datamgr = SimpleDataManager(image_size, batch_size=64)
Exemple #25
0
        if ((epoch + 1) % params.save_freq == 0) or (epoch == stop_epoch - 1):
            outfile = os.path.join(params.checkpoint_dir,
                                   '{:d}.tar'.format(epoch + 1))
            model.save(outfile, epoch)

    return


# --- main function ---
if __name__ == '__main__':

    # set numpy random seed
    np.random.seed(10)

    # parse argument
    params = parse_args('train')
    print('--- LFTNet training: {} ---\n'.format(params.name))
    print(params)

    # output and tensorboard dir
    params.tf_dir = '%s/log/%s' % (params.save_dir, params.name)
    params.checkpoint_dir = '%s/checkpoints/%s' % (params.save_dir,
                                                   params.name)
    if not os.path.isdir(params.checkpoint_dir):
        os.makedirs(params.checkpoint_dir)

    # dataloader
    print('\n--- prepare dataloader ---')
    print('  train with multiple seen domains (unseen domain: {})'.format(
        params.testset))
    datasets = ['miniImagenet', 'cars', 'places', 'cub', 'plantae']
Exemple #26
0
def main():
    time1 = datetime.strftime(datetime.now(), '%Y%m%d%H%M%S')
    fh = open('/home/lx/DRNet/experiment/spatialrel/rel' + time1 + '.txt',
              'w',
              encoding='utf-8')
    args = parse_args()
    if args.custom_on == 'on':
        dataloader_train = create_dataloader_depth(args.train_split, True,
                                                   args)
        dataloader_valid = create_dataloader_depth('valid', True, args)
        dataloader_test = create_dataloader_depth('test', True, args)
    else:
        dataloader_train = create_dataloader(args.train_split, True, args)
        dataloader_valid = create_dataloader('valid', True, args)
        dataloader_test = create_dataloader('test', True, args)
    print('%d batches of training examples' % len(dataloader_train))
    print('%d batches of validation examples' % len(dataloader_valid))
    print('%d batches of testing examples' % len(dataloader_test))

    phrase_encoder = RecurrentPhraseEncoder(300, 300)

    if args.depth_on == 'on':
        model = DRNet_depth(phrase_encoder, args.feature_dim, args.num_layers,
                            args)
    else:
        model = DRNet(phrase_encoder, args.feature_dim, args.num_layers, args)

    model.cuda()

    # criterion = nn.BCEWithLogitsLoss()
    criterion = nn.CrossEntropyLoss()
    criterion.cuda()

    optimizer = torch.optim.RMSprop(
        [p for p in model.parameters() if p.requires_grad],
        lr=args.learning_rate,
        momentum=args.momentum,
        weight_decay=args.l2)
    if args.train_split == 'train':
        scheduler = ReduceLROnPlateau(optimizer, patience=4, verbose=True)
    else:
        scheduler = StepLR(optimizer, step_size=args.patience, gamma=0.1)

    start_epoch = 0
    if args.resume != None:
        print(' => loading model checkpoint from %s..' % args.resume)
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        model.cuda()
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_epoch = checkpoint['epoch']

    best_acc = -1.

    for epoch in range(start_epoch, start_epoch + args.n_epochs):
        print('epoch #%d' % epoch)

        print('training..')
        loss, acc = train(model, criterion, optimizer, dataloader_train, epoch,
                          args)
        print('\n\ttraining loss = %.4f' % loss)
        print('\ttraining accuracy = %.3f' % acc)
        checkpoint_filename = os.path.join(
            args.log_dir, 'checkpoints/model_%.3f_%02d.pth' % (acc, epoch))
        if epoch % 5 == 0:
            print('validating..')
            torch.cuda.synchronize()
            start = time.time()
            loss, acc = test('valid', model, criterion, dataloader_valid,
                             epoch, args)
            torch.cuda.synchronize()
            end = time.time()
            dtime = ((end - start) / len(dataloader_valid) / args.batchsize)
            print('\n\tvalidation loss = %.4f' % loss)
            print('\tvalidation accuracy = %.3f' % acc)
            print('\tvalidation time per input = %.3f' % dtime)

        model.cpu()
        torch.save(
            {
                'epoch': epoch + 1,
                'args': args,
                'state_dict': model.state_dict(),
                'accuracy': acc,
                'optimizer': optimizer.state_dict(),
            }, checkpoint_filename)
        model.cuda()

        if args.train_split != 'train_valid' and best_acc < acc:
            best_acc = acc
            shutil.copyfile(
                checkpoint_filename,
                os.path.join(args.log_dir, 'checkpoints/model_best.pth'))
            shutil.copyfile(
                os.path.join(args.log_dir,
                             'predictions/pred_%02d.pickle' % epoch),
                os.path.join(args.log_dir, 'predictions/pred_best.pickle'))

        if args.train_split == 'train':
            scheduler.step(loss)
        else:
            scheduler.step()

    print('testing..')
    loss, acc = test('test', model, criterion, dataloader_test, None, args)
    print('\n\ttesting loss = %.4f' % loss)
    print('\ttesting accuracy = %.3f' % acc)
    fh.close()
Exemple #27
0
def answer(topic=None):
    """
    Main rendering function, it processes incoming weather queries.
    Depending on user agent it returns output in HTML or ANSI format.

    Incoming data:
        request.args
        request.headers
        request.remote_addr
        request.referrer
        request.query_string
    """

    user_agent = request.headers.get('User-Agent', '').lower()
    html_needed = _is_html_needed(user_agent)
    options = parse_args(request.args)

    if topic in ['apple-touch-icon-precomposed.png', 'apple-touch-icon.png', 'apple-touch-icon-120x120-precomposed.png'] \
        or (topic is not None and any(topic.endswith('/'+x) for x in ['favicon.ico'])):
        return ''

    request_id = request.cookies.get('id')
    if topic is not None and topic.lstrip('/') == ':last':
        if request_id:
            topic = last_query(request_id)
        else:
            return "ERROR: you have to set id for your requests to use /:last\n"
    else:
        if request_id:
            save_query(request_id, topic)

    if request.method == 'POST':
        process_post_request(request, html_needed)
        if html_needed:
            return redirect("/")
        return "OK\n"

    if 'topic' in request.args:
        return redirect("/%s" % request.args.get('topic'))

    if topic is None:
        topic = ":firstpage"

    if topic.startswith(':shell-x/'):
        return _proxy()
        #return requests.get('http://127.0.0.1:3000'+topic[8:]).text

    lang = get_answer_language(request)
    if lang:
        options['lang'] = lang

    ip_address = get_request_ip(request)
    if '+' in topic:
        not_allowed = LIMITS.check_ip(ip_address)
        if not_allowed:
            return "429 %s\n" % not_allowed, 429

    html_is_needed = _is_html_needed(
        user_agent) and not is_result_a_script(topic)
    if html_is_needed:
        output_format = 'html'
    else:
        output_format = 'ansi'
    result, found = cheat_wrapper(topic,
                                  request_options=options,
                                  output_format=output_format)
    if 'Please come back in several hours' in result and html_is_needed:
        malformed_response = open(
            os.path.join(CONFIG["path.internal.malformed"])).read()
        return malformed_response

    log_query(ip_address, found, topic, user_agent)
    if html_is_needed:
        return result
    return Response(result, mimetype='text/plain')
def explain_gnnnet():
    params = options.parse_args('test')
    feature_model = backbone.model_dict['ResNet10']
    params.method = 'gnnnet'
    params.dataset = 'miniImagenet'  # name relationnet --testset miniImagenet
    params.name = 'gnn'
    params.testset = 'miniImagenet'
    params.data_dir = '/home/sunjiamei/work/fewshotlearning/dataset/'
    params.save_dir = '/home/sunjiamei/work/fewshotlearning/CrossDomainFewShot-master/output'

    if 'Conv' in params.model:
        image_size = 84
    else:
        image_size = 224
    split = params.split
    n_query = 1
    loadfile = os.path.join(params.data_dir, params.testset, split + '.json')
    few_shot_params = dict(n_way=params.test_n_way, n_support=params.n_shot)
    data_datamgr = SetDataManager(image_size,
                                  n_query=n_query,
                                  **few_shot_params)
    data_loader = data_datamgr.get_data_loader(loadfile, aug=False)

    # model
    print('  build metric-based model')
    if params.method == 'protonet':
        model = ProtoNet(backbone.model_dict[params.model], **few_shot_params)
    elif params.method == 'matchingnet':
        model = MatchingNet(backbone.model_dict[params.model],
                            **few_shot_params)
    elif params.method == 'gnnnet':
        model = GnnNet(backbone.model_dict[params.model], **few_shot_params)
    elif params.method in ['relationnet', 'relationnet_softmax']:
        if params.model == 'Conv4':
            feature_model = backbone.Conv4NP
        elif params.model == 'Conv6':
            feature_model = backbone.Conv6NP
        else:
            feature_model = backbone.model_dict[params.model]
        loss_type = 'LRP'
        model = RelationNet(feature_model,
                            loss_type=loss_type,
                            **few_shot_params)
    else:
        raise ValueError('Unknown method')

    checkpoint_dir = '%s/checkpoints/%s' % (params.save_dir, params.name)
    # print(checkpoint_dir)
    if params.save_epoch != -1:
        modelfile = get_assigned_file(checkpoint_dir, params.save_epoch)
    else:
        modelfile = get_best_file(checkpoint_dir)
        # print(modelfile)
    if modelfile is not None:
        tmp = torch.load(modelfile)
        try:
            model.load_state_dict(tmp['state'])
            print('loaded pretrained model')
        except RuntimeError:
            print('warning! RuntimeError when load_state_dict()!')
            model.load_state_dict(tmp['state'], strict=False)
        except KeyError:
            for k in tmp['model_state']:  ##### revise latter
                if 'running' in k:
                    tmp['model_state'][k] = tmp['model_state'][k].squeeze()
            model.load_state_dict(tmp['model_state'], strict=False)
        except:
            raise

    model = model.cuda()
    model.eval()
    model.n_query = n_query
    # for module in model.modules():
    #   print(type(module))
    lrp_preset = lrp_presets.SequentialPresetA()
    feature_model = model.feature
    fc_encoder = model.fc
    gnn_net = model.gnn
    lrp_wrapper.add_lrp(fc_encoder, lrp_preset)
    # lrp_wrapper.add_lrp(feature_model, lrp_preset)
    # lrp_wrapper.add_lrp(fc_encoder,lrp_preset)
    # lrp_wrapper.add_lrp(feature_model, lrp_preset)

    # acc = 0
    # count = 0
    # tested the forward pass is correct by observing the accuracy
    # for i, (x, _, _) in enumerate(data_loader):
    #   x = x.cuda()
    #   support_label = torch.from_numpy(np.repeat(range(model.n_way), model.n_support)).unsqueeze(1)
    #   support_label = torch.zeros(model.n_way*model.n_support, model.n_way).scatter(1, support_label, 1).view(model.n_way, model.n_support, model.n_way)
    #   support_label = torch.cat([support_label, torch.zeros(model.n_way, 1, model.n_way)], dim=1)
    #   support_label = support_label.view(1, -1, model.n_way)
    #   support_label = support_label.cuda()
    #   x = x.view(-1, *x.size()[2:])
    #
    #   x_feature = feature_model(x)
    #   x_fc_encoded = fc_encoder(x_feature)
    #   z = x_fc_encoded.view(model.n_way, -1, x_fc_encoded.size(1))
    #   gnn_feature = [
    #     torch.cat([z[:, :model.n_support], z[:, model.n_support + i:model.n_support + i + 1]], dim=1).view(1, -1, z.size(2))
    #     for i in range(model.n_query)]
    #   gnn_nodes = torch.cat([torch.cat([z, support_label], dim=2) for z in gnn_feature], dim=0)
    #   scores = gnn_net(gnn_nodes)
    #   scores = scores.view(model.n_query, model.n_way, model.n_support + 1, model.n_way)[:, :, -1].permute(1, 0,
    #                                                                                                    2).contiguous().view(
    #     -1, model.n_way)
    #   pred = scores.data.cpu().numpy().argmax(axis=1)
    #   y = np.repeat(range(model.n_way), n_query)
    #   acc += np.sum(pred == y)
    #   count += len(y)
    #   # print(1.0*acc/count)
    # print(1.0*acc/count)
    with open(
            '/home/sunjiamei/work/fewshotlearning/dataset/miniImagenet/class_to_readablelabel.json',
            'r') as f:
        class_to_readable = json.load(f)
    explanation_save_dir = os.path.join(params.save_dir, 'explanations',
                                        params.name)
    if not os.path.isdir(explanation_save_dir):
        os.makedirs(explanation_save_dir)
    for batch_idx, (x, y, p) in enumerate(data_loader):
        print(p)
        label_to_readableclass, query_img_path, query_gt_class = LRPutil.get_class_label(
            p, class_to_readable, model.n_query)
        x = x.cuda()
        support_label = torch.from_numpy(
            np.repeat(range(model.n_way),
                      model.n_support)).unsqueeze(1)  #torch.Size([25, 1])
        support_label = torch.zeros(model.n_way * model.n_support,
                                    model.n_way).scatter(1, support_label,
                                                         1).view(
                                                             model.n_way,
                                                             model.n_support,
                                                             model.n_way)
        support_label = torch.cat(
            [support_label,
             torch.zeros(model.n_way, 1, model.n_way)], dim=1)
        support_label = support_label.view(1, -1, model.n_way)
        support_label = support_label.cuda()  #torch.Size([1, 30, 5])
        x = x.contiguous()
        x = x.view(-1, *x.size()[2:])  #torch.Size([30, 3, 224, 224])
        x_feature = feature_model(x)  #torch.Size([30, 512])
        x_fc_encoded = fc_encoder(x_feature)  #torch.Size([30, 128])
        z = x_fc_encoded.view(model.n_way, -1,
                              x_fc_encoded.size(1))  # (5,6,128)
        gnn_feature = [
            torch.cat([
                z[:, :model.n_support],
                z[:, model.n_support + i:model.n_support + i + 1]
            ],
                      dim=1).view(1, -1, z.size(2))
            for i in range(model.n_query)
        ]  # model.n_query is the number of query images for each class
        # gnn_feature is grouped into n_query groups. each group contains the support image features concatenated with one query image features.
        # print(len(gnn_feature), gnn_feature[0].shape)
        gnn_nodes = torch.cat(
            [torch.cat([z, support_label], dim=2) for z in gnn_feature], dim=0
        )  # the features are concatenated with the one hot label. for the unknow image the one hot label is all zero

        #  perform gnn_net step by step
        #  the first iteration
        print('x', gnn_nodes.shape)
        W_init = torch.eye(
            gnn_nodes.size(1), device=gnn_nodes.device
        ).unsqueeze(0).repeat(gnn_nodes.size(0), 1, 1).unsqueeze(
            3
        )  # (n_querry, n_way*(num_support + 1), n_way*(num_support + 1), 1)
        # print(W_init.shape)

        W1 = gnn_net._modules['layer_w{}'.format(0)](
            gnn_nodes, W_init
        )  # (n_querry, n_way*(num_support + 1), n_way*(num_support + 1), 2)
        # print(Wi.shape)
        x_new1 = F.leaky_relu(gnn_net._modules['layer_l{}'.format(0)](
            [W1, gnn_nodes])[1])  # (num_querry, num_support + 1, num_outputs)
        # print(x_new1.shape)  #torch.Size([1, 30, 48])
        gnn_nodes_1 = torch.cat([gnn_nodes, x_new1],
                                2)  # (concat more features)
        # print('gn1',gnn_nodes_1.shape) #torch.Size([1, 30, 181])

        #  the second iteration
        W2 = gnn_net._modules['layer_w{}'.format(1)](
            gnn_nodes_1, W_init
        )  # (n_querry, n_way*(num_support + 1), n_way*(num_support + 1), 2)
        x_new2 = F.leaky_relu(gnn_net._modules['layer_l{}'.format(1)](
            [W2,
             gnn_nodes_1])[1])  # (num_querry, num_support + 1, num_outputs)
        # print(x_new2.shape)
        gnn_nodes_2 = torch.cat([gnn_nodes_1, x_new2],
                                2)  # (concat more features)
        # print('gn2', gnn_nodes_2.shape)  #torch.Size([1, 30, 229])

        Wl = gnn_net.w_comp_last(gnn_nodes_2, W_init)
        # print(Wl.shape)  #torch.Size([1, 30, 30, 2])
        scores = gnn_net.layer_last(
            [Wl, gnn_nodes_2])[1]  # (num_querry, num_support + 1, num_way)
        print(scores.shape)

        scores_sf = torch.softmax(scores, dim=-1)
        # print(scores_sf)

        gnn_logits = torch.log(LRPutil.LOGIT_BETA * scores_sf /
                               (1 - scores_sf))
        gnn_logits = gnn_logits.view(-1, model.n_way,
                                     model.n_support + n_query, model.n_way)
        # print(gnn_logits)
        query_scores = scores.view(
            model.n_query, model.n_way, model.n_support + 1,
            model.n_way)[:, :,
                         -1].permute(1, 0,
                                     2).contiguous().view(-1, model.n_way)
        preds = query_scores.data.cpu().numpy().argmax(axis=-1)
        # print(preds.shape)
        for k in range(model.n_way):
            mask = torch.zeros(5).cuda()
            mask[k] = 1
            gnn_logits_cls = gnn_logits.clone()
            gnn_logits_cls[:, :, -1] = gnn_logits_cls[:, :, -1] * mask
            # print(gnn_logits_cls)
            # print(gnn_logits_cls.shape)
            gnn_logits_cls = gnn_logits_cls.view(-1, model.n_way)
            relevance_gnn_nodes_2 = explain_Gconv(gnn_logits_cls,
                                                  gnn_net.layer_last, Wl,
                                                  gnn_nodes_2)
            relevance_x_new2 = relevance_gnn_nodes_2.narrow(-1, 181, 48)
            # relevance_gnn_nodes = relevance_gnn_nodes_2
            relevance_gnn_nodes_1 = explain_Gconv(
                relevance_x_new2, gnn_net._modules['layer_l{}'.format(1)], W2,
                gnn_nodes_1)
            relevance_x_new1 = relevance_gnn_nodes_1.narrow(-1, 133, 48)
            relevance_gnn_nodes = explain_Gconv(
                relevance_x_new1, gnn_net._modules['layer_l{}'.format(0)], W1,
                gnn_nodes)
            relevance_gnn_features = relevance_gnn_nodes.narrow(-1, 0, 128)
            print(relevance_gnn_features.shape)
            relevance_gnn_features += relevance_gnn_nodes_1.narrow(-1, 0, 128)
            relevance_gnn_features += relevance_gnn_nodes_2.narrow(
                -1, 0, 128)  #[2, 30, 128]
            relevance_gnn_features = relevance_gnn_features.view(
                n_query, model.n_way, model.n_support + 1, 128)
            for i in range(n_query):
                query_i = relevance_gnn_features[i][:, model.
                                                    n_support:model.n_support +
                                                    1]
                if i == 0:
                    relevance_z = query_i
                else:
                    relevance_z = torch.cat((relevance_z, query_i), 1)
            relevance_z = relevance_z.view(-1, 128)
            query_feature = x_feature.view(model.n_way, -1,
                                           512)[:, model.n_support:]
            # print(query_feature.shape)
            query_feature = query_feature.contiguous()
            query_feature = query_feature.view(n_query * model.n_way, 512)
            # print(query_feature.shape)
            relevance_query_features = fc_encoder.compute_lrp(
                query_feature, target=relevance_z)
            # print(relevance_query_features.shape)
            # print(relevance_gnn_features.shape)
            # explain the fc layer and the image encoder
            query_images = x.view(model.n_way, -1,
                                  *x.size()[1:])[:, model.n_support:]
            query_images = query_images.contiguous()

            query_images = query_images.view(-1, *x.size()[1:]).detach()
            # print(query_images.shape)
            lrp_wrapper.add_lrp(feature_model, lrp_preset)
            relevance_query_images = feature_model.compute_lrp(
                query_images, target=relevance_query_features)
            print(relevance_query_images.shape)

            for j in range(n_query * model.n_way):
                predict_class = label_to_readableclass[preds[j]]
                true_class = query_gt_class[int(j % model.n_way)][int(
                    j // model.n_way)]
                explain_class = label_to_readableclass[k]
                img_name = query_img_path[int(j % model.n_way)][int(
                    j // model.n_way)].split('/')[-1]
                if not os.path.isdir(
                        os.path.join(explanation_save_dir, 'episode' +
                                     str(batch_idx), img_name.strip('.jpg'))):
                    os.makedirs(
                        os.path.join(explanation_save_dir,
                                     'episode' + str(batch_idx),
                                     img_name.strip('.jpg')))
                save_path = os.path.join(explanation_save_dir,
                                         'episode' + str(batch_idx),
                                         img_name.strip('.jpg'))
                if not os.path.exists(
                        os.path.join(
                            save_path,
                            true_class + '_' + predict_class + img_name)):
                    original_img = Image.fromarray(
                        np.uint8(
                            project(query_images[j].permute(
                                1, 2, 0).detach().cpu().numpy())))
                    original_img.save(
                        os.path.join(
                            save_path,
                            true_class + '_' + predict_class + img_name))

                img_relevance = relevance_query_images.narrow(0, j, 1)
                print(predict_class, true_class, explain_class)
                # assert relevance_querry_cls[j].sum() != 0
                # assert img_relevance.sum()!=0
                hm = img_relevance.permute(0, 2, 3, 1).cpu().detach().numpy()
                hm = LRPutil.gamma(hm)
                hm = LRPutil.heatmap(hm)[0]
                hm = project(hm)
                hp_img = Image.fromarray(np.uint8(hm))
                hp_img.save(
                    os.path.join(
                        save_path,
                        true_class + '_' + explain_class + '_lrp_hm.jpg'))

        break
Exemple #29
0
                'segmentation_0_planenet', 'segmentation_0_warping'
            ]
            writeHTML(args.test_dir,
                      info_list,
                      numImages=30,
                      convertToImage=False,
                      filename='comparison_segmentation')
            pass
        exit(1)
        pass

    evaluate(args)


if __name__ == '__main__':
    args = parse_args()

    if args.dataset == '':
        args.keyname = 'evaluate'
    else:
        args.keyname = args.dataset
        pass
    args.test_dir = 'test/' + args.keyname

    if args.testingIndex >= 0:
        args.debug = True
        pass
    if args.debug:
        args.test_dir += '_debug'
        args.printInfo = True
        pass
Exemple #30
0
        pass
    instance_info = [(mask[:num_ori_coords], label, confidence)
                     for mask, label, confidence in instance_info]

    instance_pred = instance_pred[:num_ori_coords]
    semantic_pred = semantic_pred[:num_ori_coords]
    writeInstances(test_dir,
                   scene_id,
                   instance_pred,
                   semantic_pred,
                   instance_info=instance_info)
    return


if __name__ == '__main__':
    options = parse_args()
    options.keyname = 'instance'
    #args.keyname += '_' + args.dataset

    if options.suffix != '':
        options.keyname += '_' + options.suffix
        pass
    if options.numScales != 1:
        options.keyname += '_' + str(options.numScales)
        pass

    options.checkpoint_dir = 'checkpoint/' + options.keyname

    filenames = []
    split = options.split