Ejemplo n.º 1
0
def load_model(directory):
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    model = create_model(parse_config(directory)['model'])
    model.load(directory)
    model.train()
    model.to(device)
    return model
def dataloader_debug():
    import sys
    os.environ["CUDA_VISIBLE_DEVICES"] = '3'

    args = init_args()
    assert os.path.exists(args.config_file)
    config = anyconfig.load(open(args.config_file, 'rb'))
    # print('===config:', config)
    if 'base' in config:
        config = parse_config(config)
    print('===config:', config)
    print('==torch.cuda.device_count():', torch.cuda.device_count())
    if torch.cuda.device_count() > 1:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(
            backend="nccl",
            init_method="env://",
            world_size=torch.cuda.device_count(),
            rank=args.local_rank)
        config['distributed'] = True
    else:
        config['distributed'] = False
    config['local_rank'] = args.local_rank

    train_loader = get_trainloader(dataset.ICDAR2015Dataset, config)
    # eval_loader = get_evalloader(dataset.ICDAR2015Dataset, config)

    output_path = './查看图片_dataloader'
    if not os.path.exists(output_path):
        os.mkdir(output_path)
    epochs = 1
    for epoch in range(epochs):
        for i, data_info in enumerate(tqdm(train_loader)):
            # if i < 1:
            print('===data_info:', data_info.keys())
            batch_img = data_info['img']
            shrink_map = data_info['shrink_map']
            # threshold_label = data_info['threshold_map']
            batch_gt = data_info['gt']
            print('== batch_img.shape', batch_img.shape)
            print('===shrink_map.shape', shrink_map.shape)
            # print(batch_img.shape, threshold_label.shape, threshold_label.shape, batch_gt.shape, data_shape)

            for j in range(batch_img.shape[0]):
                img = batch_img[j].numpy().transpose(1, 2, 0)
                gt = batch_gt[j].numpy() * 255.
                # print('===img.shape:', img.shape)
                # shrink_label = shrink_map[j].numpy()*255.
                gt = np.expand_dims(gt, axis=-1)
                img = (img * np.array([0.229, 0.224, 0.225]) +
                       np.array([0.485, 0.456, 0.406])) * 255.
                img = np.clip(gt + img, 0, 255)
                cv2.imwrite(
                    os.path.join(output_path,
                                 str(i) + '_' + str(j) + '.jpg'),
                    img[..., ::-1])
Ejemplo n.º 3
0
def main_entrance():
    os.environ["CUDA_VISIBLE_DEVICES"] = '3'
    args = init_args()
    config = anyconfig.load(open(args.config_file, 'rb'))
    # print('===config:', config)
    if 'base' in config:
        config = parse_config(config)
    print('===config:', config)
    if torch.cuda.device_count() > 1:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl", init_method="env://", world_size=torch.cuda.device_count(),
                                             rank=args.local_rank)
        config['distributed'] = True
    else:
        config['distributed'] = False
    config['local_rank'] = args.local_rank
    logging.info(config['dataset']['train'])
    model = build_model(config['arch']['type'], **config['arch'])

    criterion = build_loss(config['loss'].pop('type'), **config['loss']).cuda()
    post_process = get_post_processing(config['post_processing'])
    train_loader = get_trainloader(dataset.ICDAR2015Dataset, config)
    eval_loader = get_evalloader(dataset.ICDAR2015Dataset, config)

    model = model.cuda()
    if config['distributed']:
        model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
                                                    output_device=args.local_rank, broadcast_buffers=False,
                                                    find_unused_parameters=True)
    checkpoint_path = config['train']['resume_checkpoint']
    output_path = config['train']['output_path']
    optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=0.00005)
    # load_weights(model, optimizer, config['distributed'], checkpoint_path='/red_detection/DBNet/code_pretrain_model/model_latest_express_code_7_13.pth')
    # load_weights(model, optimizer, config['distributed'], checkpoint_path=checkpoint_path)

    epochs = config['train']['epochs']
    warmup_iters = config['lr_scheduler']['args']['warmup_epoch']*len(train_loader)
    # scheduler = WarmupPolyLR(optimizer, max_iters=epochs * len(train_loader),
    #                          warmup_iters=warmup_iters, **config['lr_scheduler']['args'])

    train(model, optimizer, epochs, criterion, train_loader, config, post_process, eval_loader,output_path)

    from matplotlib import pyplot as plt

    plt.plot(lr_list)
    plt.savefig('./show_lr_word_industry.png')
Ejemplo n.º 4
0
        if done:
            print()
            counter = 0
            reward_sum = 0
            save_info("submit_logs/second/log_{}.json".format(episodes),
                      env.get_episode_info())
            episodes += 1
            (observation, _) = env.reset(False)
            if observation is None:
                break


if __name__ == '__main__':
    directory = '/data/svidchenko/afterlearning/moar_features_1/sgdr_1/saved_models/exploiting_virtual_thread_0/episode_30_reward_9841.04/'

    directories = [[
        #'/data/svidchenko/afterlearning/moar_features_1/sgdr_1/saved_models/exploiting_virtual_thread_0/episode_48_reward_9840.57/',
        #'/data/svidchenko/afterlearning/moar_features_1/sgdr_1/saved_models/exploiting_virtual_thread_0/episode_30_reward_9841.04/',
        #'/data/svidchenko/afterlearning/moar_features_1/sgdr_1/saved_models/exploiting_virtual_thread_0/episode_66_reward_9851.11//',
        '/data/svidchenko/afterlearning/moar_features_1/sgdr_1/saved_models/exploiting_virtual_thread_0/episode_100_reward_9854.58/',
        '/data/svidchenko/afterlearning/moar_features_1/sgdr_1/saved_models/exploiting_virtual_thread_1/episode_73_reward_9867.67/',
        '/data/svidchenko/afterlearning/moar_features_1/sgdr_1/saved_models/exploiting_virtual_thread_0/episode_38_reward_9879.41//',
        '/data/svidchenko/afterlearning/moar_features_1/sgdr_1/saved_models/exploiting_virtual_thread_3/episode_9_reward_9881.80//'
    ]]

    config = parse_config(directory)
    config['environment']['wrapper']['repeat_actions'] = 3

    submit(config, directories, repeats=1)
Ejemplo n.º 5
0
from training.managers import TrainManager
from utils.util import parse_config

CONFIG_FILENAME = 'config.json'
AFTERLEARN_CONFIG_FILENAME = 'afterlearn_config.json'

if __name__ == '__main__':
    config = parse_config(config_name=CONFIG_FILENAME)
    manager = TrainManager(config)
    manager.manage()
Ejemplo n.º 6
0
    #
    # directories = [
    #     # '/home/ivan/data/prosthetics/exps/6/saved_models/exploiting_common_thread_0/episode_676_reward_2406.04/',
    #     # '/home/ivan/data/prosthetics/exps/6/saved_models/exploiting_common_thread_0/episode_684_reward_2407.29/',
    #     # '/home/ivan/data/prosthetics/exps/6/saved_models/exploiting_common_thread_0/episode_696_reward_2414.30/'
    # ]

    directory = './../../logdir/saved_models/moar_features_1/'
    # directory = './saved_models/exploiting_thread_1/episode_516_reward_9465.81/'

    directories = [
        # './../../logdir/saved_models/coors_rotation/second',
        # './../../logdir/saved_models/coors_rotation/third'
    ]

    config = parse_config()
    config["environment"]["wrapper"]["target_transformer_config"]["noise"] = 0.
    config["environment"]["wrapper"]["target_transformer_type"] = "normal"
    config["environment"]["wrapper"]["repeat_actions"] = 3
    config["environment"]["core"]["max_steps"] = 1000
    config["environment"]["wrapper"]["reward_aggregations"] = [{
        "class": "TransformAndBound",
        "config": {
            "move": -19.0,
            "scale": 2.0,
            "bound": 1.0
        }
    }]
    rewards = []

    for seed_plus in range(40):
Ejemplo n.º 7
0
def multitest(model_sets, log, averaging_models=(2, 3, 4), seeds_per_thread=4, repeat_set=(1, 2), model_workers=10,
              average_weights=False):
    total_results = []
    model_sets = all_combinations(model_sets)
    # model_sets = list(reversed([[s] for s in model_sets]))
    random.shuffle(model_sets)
    random.shuffle(model_sets)
    random.shuffle(model_sets)
    try:
        for directories in model_sets:
            for repeats in repeat_set:
                directory = directories[0]
                dirs = list(reversed(directories))
                config = parse_config(directory)

                rewards, modified_rewards, step_counts, infos, rws = evaluate(config, dirs,
                                                                              seeds_per_thread=seeds_per_thread,
                                                                              repeats=repeats,
                                                                              model_workers=model_workers,
                                                                              average_weights=average_weights)
                logging_list = []

                for r, mr, sc, inf in zip(rewards, modified_rewards, step_counts, infos):
                    logging_list.append({
                        "reward": r,
                        "modified_reward": mr,
                        "step_count": sc,
                        "info": inf
                    })

                total_results.append({
                    "models": dirs,
                    "repeats": repeats,
                    "result": logging_list,
                    "rewards": [mean_confidence_interval(rewards, confidence=0.95),
                                mean_confidence_interval(rewards, confidence=0.99),
                                np.quantile(rewards, [0.05, 0.1, 0.25, 0.5])],
                    "rewards_without_falling": [mean_confidence_interval(rws, confidence=0.95),
                                                mean_confidence_interval(rws, confidence=0.99),
                                                np.quantile(rws, [0.05, 0.1, 0.25, 0.5])],
                    "falling_rate": {
                        "300": len([st for st in step_counts if st < 300]) / len(step_counts),
                        "600": len([st for st in step_counts if st < 600]) / len(step_counts),
                        "900": len([st for st in step_counts if st < 900]) / len(step_counts),
                        "1000": len([st for st in step_counts if st < 1000]) / len(step_counts)
                    }
                })

                for i in range(10):
                    print()
                print_model(total_results[-1])
                print('='*20)
                print()
                print()

                for res in sorted(total_results, key=lambda res: res["rewards_without_falling"][0][0], reverse=True)[
                           :min(len(total_results), 5)]:
                    print_model(res)

                for i in range(10):
                    print()
    except Exception as e:
        print("Exception", e)
        pass
    finally:
        for i in range(10):
            print()
        print("Results:")
        for res in total_results:
            print_model(res)

        save_info(log, total_results)
Ejemplo n.º 8
0
global maxAcc
maxAcc = 0

if (args.cuda == True):
    print("GPU in use", torch.cuda.is_available(), torch.cuda.current_device(),
          torch.cuda.device_count(),
          torch.cuda.get_device_name(torch.cuda.current_device()))
kwargs = {'num_workers': 4, 'pin_memory': False} if args.cuda else {}

if (args.name == None):
    name = 'FPGA4HEPmodel'
else:
    name = args.name

yamlConfig = parse_config('./yaml_IP_OP_config.yml')
print("Reading dataset...")
X_train_val, X_test, y_train_val, y_test, labels, train_loader, test_loader, input_shape, output_shape = get_features(
    yamlConfig, args.batch_size, args.test_batch_size)

dtype = torch.cuda.FloatTensor if args.cuda else torch.FloatTensor

model = LFC(num_classes=5, weight_bit_width=8, act_bit_width=8, in_bit_width=8)
if args.cuda:
    model.cuda()
print(model)

## Optimizers
optimizer = optim.SGD(filter(lambda x: x.requires_grad, model.parameters()),
                      lr=args.lr,
                      momentum=args.momentum,