Exemple #1
0
def main():
    args = parser.parse_args()

    if not args.checkpoint:
        args.pretrained = True

    # create model
    model = create_model(
        args.model,
        num_classes=args.num_classes,
        in_chans=3,
        pretrained=args.pretrained,
        checkpoint_path=args.checkpoint)

    model.eval()

    x = torch.randn((1, 3, args.img_size or 224, args.img_size or 224), requires_grad=True)

    torch_out = torch.onnx._export(model, x, args.output, export_params=True)

    onnx_model = onnx.load(args.output)

    caffe2_backend = onnx_caffe2.prepare(onnx_model)

    B = {onnx_model.graph.input[0].name: x.data.numpy()}

    c2_out = caffe2_backend.run(B)[0]

    np.testing.assert_almost_equal(torch_out.data.numpy(), c2_out, decimal=5)
Exemple #2
0
def _model_and_score_and_evaluate(active_fragments: str, test_fragments: str, test_activity: str,
                                  num: int, output_directory: str, maximal_num: int):
    inputoutput_utils.create_parent_directory(output_directory + "/scorefiles/0")
    inputoutput_utils.create_parent_directory(output_directory + "/activities/0")
    with open(output_directory + "/configurationfiles/configuration" + str(maximal_num) + "_" + str(num) + ".json", "r",
              encoding="utf-8") as input_file:
        for new_line in input_file:
            line = json.loads(new_line)
            if os.path.isfile(output_directory + "/evaluations/" + line["evaluation"]):
                continue 
            
            new_model = model_factory.create_model(line["model_name"])
            model = new_model.create_model(active_fragments, "", "", "",
                                           line)
            new_model.score_model(model, test_fragments, "",
                                  output_directory + "/scorefiles/score" + line["evaluation"])

            # run add_activity
            activity = add_activity.read_activity(test_activity)
            add_activity.add_activity_and_write_to_json(output_directory + "/scorefiles/score" + line["evaluation"],
                                                        activity,
                                                        output_directory + "/activities/activity" + line["evaluation"])

            # run compute_evaluation
            score_act = compute_evaluation.read_file_with_score_and_activity(output_directory + "/activities/activity"
                                                                            + line["evaluation"])
            activity = compute_evaluation.sort_activity(score_act)
            compute_evaluation.evaluation(activity, output_directory + "/evaluations/" + line["evaluation"])
def main():
    args = parser.parse_args()
    #args.model ='dpn92'
    #args.checkpoint_path ='/root/share/data/models/reference/imagenet/dualpathnet/DPN-92_5k_to_1k'
    args.model = 'dpn98'
    args.checkpoint_path = '/root/share/data/models/reference/imagenet/dualpathnet/DPN-98_1k'

    if 'dpn' not in args.model:
        print('Error: Can only convert DPN models.')
        exit(1)
    if not has_mxnet:
        print('Error: Cannot import MXNet module. Please install.')
        exit(1)

    model = model_factory.create_model(args.model,
                                       num_classes=1000,
                                       pretrained=False)

    model_prefix = args.model
    if model_prefix in ['dpn107', 'dpn68b', 'dpn92']:
        model_prefix += '-extra'
    checkpoint_base = os.path.join(args.checkpoint_path, model_prefix)
    convert_from_mxnet(model, checkpoint_base)

    output_checkpoint = os.path.join(args.checkpoint_path,
                                     model_prefix + '.pth')
    torch.save(model.state_dict(), output_checkpoint)
Exemple #4
0
def _main():
    configuration = _read_configuration()
    with open(configuration["model"], "r", encoding="utf-8") as input_stream:
        model_content = json.load(input_stream)

    model = model_factory.create_model(
        model_content["configuration"]["model_name"])
    model.score_model(model_content, configuration["fragments"],
                      configuration["descriptors"], configuration["output"])
Exemple #5
0
 def load_trained_model(self, train_data):
     p = join(self.logdir, 'trained_model*')
     files = glob.glob(p)
     best_trained_model_path = max(files, key=getctime)
     trained_model = create_model(train_data)
     trained_model.load_state_dict(
         torch.load(best_trained_model_path, map_location=FLAGS.device))
     trained_model.to(FLAGS.device)
     return trained_model
def _main():
    configuration = _read_configuration()
    with open(configuration["configuration"], "r", encoding="utf-8") as input_stream:
        model_configuration = json.load(input_stream)
    model_name = model_configuration["model_name"]

    new_model = model_factory.create_model(model_name)
    model = new_model.create_model(configuration["active_fragments"], configuration["inactive_fragments"],
                                   configuration["active_descriptors"], configuration["inactive_descriptors"],
                                   model_configuration)

    inputoutput_utils.create_parent_directory(configuration["output"])
    new_model.save_to_json_file(configuration["output"], model)
Exemple #7
0
def train(train_data, val_data, saver):
    train_data.init_node_feats(FLAGS.init_type, FLAGS.device)
    val_data.init_node_feats(FLAGS.init_type, FLAGS.device)
    model = create_model(train_data)
    model = model.to(FLAGS.device)
    pytorch_total_params = sum(p.numel() for p in model.parameters())
    print("Number params: ", pytorch_total_params)
    moving_avg = MovingAverage(FLAGS.validation_window_size,
                               FLAGS.validation_metric != 'loss')
    pyg_graph = train_data.get_pyg_graph(FLAGS.device)
    optimizer = torch.optim.Adam(
        model.parameters(),
        lr=FLAGS.lr,
    )

    for epoch in range(FLAGS.num_epochs):
        t = time.time()
        model.train()
        model.zero_grad()
        loss, preds_train = model(pyg_graph, train_data)
        loss.backward()
        optimizer.step()
        loss = loss.item()
        if COMET_EXPERIMENT:
            COMET_EXPERIMENT.log_metric("loss", loss, epoch + 1)
        with torch.no_grad():
            val_loss, preds_val = model(pyg_graph, val_data)
            val_loss = val_loss.item()
            eval_res_val = eval(preds_val, val_data)
            print("Epoch: {:04d}, Train Loss: {:.5f}, Time: {:.5f}".format(
                epoch, loss,
                time.time() - t))
            print("Val Loss: {:.5f}".format(val_loss))
            print("Val Results: ...")
            pprint(eval_res_val)
            eval_res_val["loss"] = val_loss
            if COMET_EXPERIMENT:
                COMET_EXPERIMENT.log_metrics(eval_res_val,
                                             prefix="validation",
                                             step=epoch + 1)

            if len(moving_avg.results) == 0 or moving_avg.best_result(
                    eval_res_val[FLAGS.validation_metric]):
                saver.save_trained_model(model, epoch + 1)
            moving_avg.add_to_moving_avg(eval_res_val[FLAGS.validation_metric])
            if moving_avg.stop():
                break
    best_model = saver.load_trained_model(train_data)
    return best_model, model
Exemple #8
0
def main():
    args = parser.parse_args()

    if not args.checkpoint:
        args.pretrained = True
    else:
        args.pretrained = False

    # create model
    print("==> Creating PyTorch {} model".format(args.model))
    model = create_model(args.model,
                         num_classes=args.num_classes,
                         in_chans=3,
                         pretrained=args.pretrained,
                         checkpoint_path=args.checkpoint)

    model.eval()

    x = torch.randn((1, 3, args.img_size or 224, args.img_size or 224),
                    requires_grad=True)
    model(x)  # run model once before export trace

    print("==> Exporting model to ONNX format at '{}'".format(args.output))
    input_names = ["input0"]
    output_names = ["output0"]
    torch_out = torch.onnx._export(model,
                                   x,
                                   args.output,
                                   export_params=True,
                                   verbose=False,
                                   input_names=input_names,
                                   output_names=output_names)

    print("==> Loading and checking exported model from '{}'".format(
        args.output))
    onnx_model = onnx.load(args.output)
    onnx.checker.check_model(onnx_model)  # assuming throw on error
    print("==> Passed")

    print("==> Loading model into Caffe2 backend and comparing forward pass.".
          format(args.output))
    caffe2_backend = onnx_caffe2.prepare(onnx_model)
    B = {onnx_model.graph.input[0].name: x.data.numpy()}
    c2_out = caffe2_backend.run(B)[0]
    np.testing.assert_almost_equal(torch_out.data.numpy(), c2_out, decimal=5)
    print("==> Passed")
def main():
    args = parser.parse_args()
    if 'dpn' not in args.model:
        print('Error: Can only convert DPN models.')
        exit(1)

    import model_factory

    model = model_factory.create_model(args.model,
                                       num_classes=1000,
                                       pretrained=False)

    model_prefix = args.model
    if model_prefix in ['dpn107', 'dpn68b', 'dpn92']:
        model_prefix += '-extra'
    checkpoint_base = os.path.join(args.checkpoint_path, model_prefix)
    convert_from_mxnet(model, checkpoint_base)

    output_checkpoint = os.path.join(args.checkpoint_path,
                                     model_prefix + '.pth')
    torch.save(model.state_dict(), output_checkpoint)
def _main():
    # run extract_fragments
    configuration = _read_configuration()
    
    with open(configuration["model_configuration"], "r", encoding="utf-8") as input_stream:
        model_configuration = json.load(input_stream)
    try:
        new_model = model_factory.create_model(model_configuration["model_name"])
    except:
        print("Model does not exist!")
        exit(1)
        
    if "kekule" not in model_configuration:
        model_configuration["kekule"] = False
    else:
        model_configuration["kekule"] = bool(model_configuration["kekule"])
    if "isomeric" not in model_configuration:
        model_configuration["isomeric"] = False
    else:
        model_configuration["isomeric"] = bool(model_configuration["isomeric"])
    if "fragments" not in model_configuration:
        model_configuration["fragments"] = "ecfp.6"
    parsed_types = []
    for item in model_configuration["fragments"].split(","):
        item_split = item.split(".")
        if item_split[0] != "ap":
            if not len(item_split) == 2:
                logging.error("Invalid fragment type: %s", item)
                logging.info("Expected format {TYPE}.{SIZE} or ap")
                exit(1)
            parsed_types.append({
                "name": item_split[0],
                "size": int(item_split[1])
            })
        else:
            parsed_types.append({
                "name": item_split[0],
            })
    model_configuration["fragments"] = parsed_types
                
    extraction_options = {
        "kekule": model_configuration["kekule"],
        "isomeric": model_configuration["isomeric"],
        "fragments": model_configuration["fragments"]
    }
    input_files = [configuration["input_actives"], configuration["input_inactives"],
                   configuration["test"]]
    directory = configuration["directory"]
    fragments_output_files = [directory+"/fragmentsa.json", directory+"/fragmentsi.json",
                              directory+"/fragmentst.json"]
    for file in fragments_output_files:
        inputoutput_utils.create_parent_directory(file)
    extract_fragments.extract_fragments(input_files, configuration["input_type"],
                                        fragments_output_files, extraction_options)

    # run extract_descriptors
    
    descriptors_output_files = [directory+"/descriptorsa.csv", directory+"/descriptorsi.csv",
                                directory+"/descriptorst.csv"]
    for file in descriptors_output_files:
        inputoutput_utils.create_parent_directory(file)
    if (model_configuration["model_name"] == "descriptors_model") |\
        ((model_configuration["model_name"] == "linear_regression_model") and (int(model_configuration["molecules"]) == 0)):
        compute_descriptors.compute_descriptors(fragments_output_files, descriptors_output_files,
                                                True)
    else:
        compute_descriptors.compute_descriptors(fragments_output_files, descriptors_output_files, False)

    # run create_model and score_molecules
    
    model = new_model.create_model(directory+"/fragmentsa.json", directory+"/fragmentsi.json",
                                   directory+"/descriptorsa.csv", directory+"/descriptorsi.csv",
                                   model_configuration)
    new_model.score_model(model, directory+"/fragmentst.json",
                          directory+"/descriptorst.csv", directory+"/score.json")

    # run add_activity
    activity = add_activity.read_activity(configuration["activity"])
    add_activity.add_activity_and_write_to_json(directory + "/score.json", activity,
                                                directory + "/activity.json")

    #  run compute_evaluation
    score_act = compute_evaluation.read_file_with_score_and_activity(directory + "/activity.json")
    activity = compute_evaluation.sort_activity(score_act)
    compute_evaluation.evaluation(activity, configuration["output"])
Exemple #11
0
def main():
    args = parser.parse_args()

    test_time_pool = False
    if 'dpn' in args.model and args.img_size > 224 and not args.no_test_pool:
        test_time_pool = True

    # create model
    num_classes = 1000
    model = model_factory.create_model(
        args.model,
        num_classes=num_classes,
        pretrained=args.pretrained,
        test_time_pool=test_time_pool)

    print('Model %s created, param count: %d' %
          (args.model, sum([m.numel() for m in model.parameters()])))

    # optionally resume from a checkpoint
    if args.restore_checkpoint and os.path.isfile(args.restore_checkpoint):
        print("=> loading checkpoint '{}'".format(args.restore_checkpoint))
        checkpoint = torch.load(args.restore_checkpoint)
        if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
            model.load_state_dict(checkpoint['state_dict'])
        else:
            model.load_state_dict(checkpoint)
        print("=> loaded checkpoint '{}'".format(args.restore_checkpoint))
    elif not args.pretrained:
        print("=> no checkpoint found at '{}'".format(args.restore_checkpoint))
        exit(1)

    if args.multi_gpu:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    cudnn.benchmark = True

    transforms = model_factory.get_transforms_eval(
        args.model,
        args.img_size)

    dataset = Dataset(
        args.data,
        transforms)

    loader = data.DataLoader(
        dataset,
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(loader):
        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input, volatile=True).cuda()
        target_var = torch.autograd.Variable(target, volatile=True).cuda()

        # compute output
        output = model(input_var)
        loss = criterion(output, target_var)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.data[0], input.size(0))
        top1.update(prec1[0], input.size(0))
        top5.update(prec5[0], input.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                i, len(loader), batch_time=batch_time, loss=losses,
                top1=top1, top5=top5))

    print(' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'.format(
        top1=top1, top1a=100-top1.avg, top5=top5, top5a=100.-top5.avg))
def main():
    args = parser.parse_args()

    if not args.checkpoint and not args.pretrained:
        args.pretrained = True

    # create model
    model = create_model(args.model,
                         num_classes=args.num_classes,
                         in_chans=3,
                         pretrained=args.pretrained,
                         checkpoint_path=args.checkpoint)

    print('Model %s created, param count: %d' %
          (args.model, sum([m.numel() for m in model.parameters()])))

    data_config = resolve_data_config(model, args)

    criterion = nn.CrossEntropyLoss()

    if not args.no_cuda:
        if args.num_gpu > 1:
            model = torch.nn.DataParallel(model,
                                          device_ids=list(range(
                                              args.num_gpu))).cuda()
        else:
            model = model.cuda()
        criterion = criterion.cuda()

    loader = create_loader(Dataset(args.data,
                                   load_bytes=args.tf_preprocessing),
                           input_size=data_config['input_size'],
                           batch_size=args.batch_size,
                           use_prefetcher=not args.no_cuda,
                           interpolation=data_config['interpolation'],
                           mean=data_config['mean'],
                           std=data_config['std'],
                           num_workers=args.workers,
                           crop_pct=data_config['crop_pct'],
                           tensorflow_preprocessing=args.tf_preprocessing)

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    model.eval()
    end = time.time()
    with torch.no_grad():
        for i, (input, target) in enumerate(loader):
            if not args.no_cuda:
                target = target.cuda()
                input = input.cuda()

            # compute output
            output = model(input)
            loss = criterion(output, target)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
            losses.update(loss.item(), input.size(0))
            top1.update(prec1.item(), input.size(0))
            top5.update(prec5.item(), input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.print_freq == 0:
                print(
                    'Test: [{0}/{1}]\t'
                    'Time {batch_time.val:.3f} ({batch_time.avg:.3f}, {rate_avg:.3f}/s) \t'
                    'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                    'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                    'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                        i,
                        len(loader),
                        batch_time=batch_time,
                        rate_avg=input.size(0) / batch_time.avg,
                        loss=losses,
                        top1=top1,
                        top5=top5))

    print(
        ' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'
        .format(top1=top1,
                top1a=100 - top1.avg,
                top5=top5,
                top5a=100. - top5.avg))
Exemple #13
0
test_dataset = Dataset(data_json=data_path + "test_1.json",
                       with_label=False,
                       transform=transforms)

# get data loader
print('getting data loader...')

test_dataloader = data.DataLoader(test_dataset,
                                  batch_size=config["test_batch_size"],
                                  shuffle=False,
                                  num_workers=args.workers,
                                  pin_memory=True)

# define model
model = model_factory.create_model(args.model,
                                   num_classes=num_classes,
                                   pretrained=False)
model = torch.nn.DataParallel(model).cuda()

# load checkpoint
if not os.path.isfile(config['checkpoint']):
    print("=> no checkpoint found at '{}'".format(config['checkpoint']))

print("=> loading checkpoint '{}'".format(config['checkpoint']))
checkpoint = torch.load(config['checkpoint'])
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint:")

print('Epoch: [{0}][{1}]\t'
      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
      'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
Exemple #14
0
def main():
    args = parser.parse_args()

    transforms = model_factory.get_transforms_iceberg(args.model,
                                                      args.img_size)

    train_dataset = Dataset(data_json=data_path + "train_1.json",
                            with_label=True,
                            transform=None)

    val_dataset = Dataset(data_json=data_path + "val_1.json",
                          with_label=True,
                          transform=None)

    train_dataloader = data.DataLoader(train_dataset,
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       num_workers=args.workers,
                                       pin_memory=True)

    val_dataloader = data.DataLoader(val_dataset,
                                     batch_size=args.batch_size,
                                     shuffle=True,
                                     num_workers=args.workers,
                                     pin_memory=True)

    num_train = train_dataloader.__len__()

    log_file = open(
        args.output_dir + str(args.model) + "_pretrained" +
        str(args.pretrained) + "_lr" + str(args.lr_schedule) + "_bs" +
        str(args.batch_size) + "_size" + str(args.img_size) + ".log", "w")

    # configuration
    config = {
        'train_batch_size':
        args.batch_size,
        'val_batch_size':
        10,
        'img_size':
        args.img_size,
        'arch':
        args.model,
        'pretrained':
        args.pretrained,
        'ckpt_title':
        "_lr" + str(args.lr_schedule) + "_bs" + str(args.batch_size) +
        "_size" + str(args.img_size),
        'optimizer':
        'Adam',
        'lr_schedule_idx':
        args.lr_schedule,
        'lr_schedule':
        get_lr_schedule(args.lr_schedule),
        'weight_decay':
        1e-5,
        'resume':
        None,
        'start_epoch':
        0,
        'epochs':
        args.num_epochs,
        'print_freq':
        args.print_freq,
        'validate_freq':
        num_train - 1,
        'save_freq':
        num_train - 1,
        'log_file':
        log_file,
        'best_val_prec1':
        0
    }

    # create model
    num_classes = 2
    if args.model.endswith('sigmoid'):
        num_classes = 1

    model = model_factory.create_model(args.model,
                                       num_classes=num_classes,
                                       pretrained=False,
                                       test_time_pool=args.test_time_pool)

    # resume from a checkpoint
    if args.restore_checkpoint and os.path.isfile(args.restore_checkpoint):
        print("=> loading checkpoint '{}'".format(args.restore_checkpoint))
        checkpoint = torch.load(args.restore_checkpoint)

        print('Epoch: [{0}] iter: [{1}]\t'
              'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
              'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
              'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                  checkpoint['epoch'],
                  checkpoint['iter'],
                  loss=checkpoint['loss'],
                  top1=checkpoint['top1'],
                  top5=checkpoint['top5']))

        config = checkpoint['config']
        # set to resume mode
        config['resume'] = args.restore_checkpoint
        print(config)

        config['log_file'] = open(
            args.output_dir + str(config['arch']) + "_lr" +
            str(config['lr_schedule_idx']) + "_bs" +
            str(config['train_batch_size']) + "_size" +
            str(config['img_size']) + ".log", "a+")
    elif args.pretrained is True:
        print("using pretrained model")
        original_model = args.model
        if args.model.endswith('sigmoid'):
            original_model = args.model.rsplit('_', 1)[0]
        pretrained_model = model_factory.create_model(
            original_model,
            num_classes=1000,
            pretrained=args.pretrained,
            test_time_pool=args.test_time_pool)

        pretrained_state = pretrained_model.state_dict()
        model_state = model.state_dict()

        fc_layer_name = 'fc'
        if args.model.startswith('dpn') or args.model.startswith('vgg'):
            fc_layer_name = 'classifier'

        for name, state in pretrained_state.items():
            if not name.startswith(fc_layer_name):
                model_state[name].copy_(state)
    else:
        print("please use pretrained model")
        # exit(1)

    if args.multi_gpu:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    # define loss function (criterion) and optimizer
    criterion = torch.nn.CrossEntropyLoss().cuda()

    if args.model.endswith('sigmoid'):
        criterion = torch.nn.BCELoss().cuda()

    # get trainer
    Trainer = get_trainer(train_dataloader, val_dataloader, model, criterion,
                          config)

    # Run!
    Trainer.run()
def validate(model_name, args):

    # create model
    model = create_model(
        model_name,
        num_classes=args.num_classes,
        pretrained=args.pretrained,
        checkpoint_path=args.checkpoint)

    param_count = sum([m.numel() for m in model.parameters()])
    print('Model %s created, param count: %d' % (args.model,  param_count))

    data_config = resolve_data_config(model, args)

    criterion = nn.CrossEntropyLoss()

    if not args.no_cuda:
        if args.num_gpu > 1:
            model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
        else:
            model = model.cuda()
        criterion = criterion.cuda()

    loader = create_loader(
        Dataset(args.data),
        input_size=data_config['input_size'],
        batch_size=args.batch_size,
        use_prefetcher=not args.no_cuda,
        interpolation=data_config['interpolation'],
        mean=data_config['mean'],
        std=data_config['std'],
        num_workers=args.workers,
        crop_pct=data_config['crop_pct'])

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    model.eval()
    end = time.time()
    with torch.no_grad():
        for i, (input, target) in enumerate(loader):
            if not args.no_cuda:
                target = target.cuda()
                input = input.cuda()

            # compute output
            output = model(input)
            loss = criterion(output, target)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
            losses.update(loss.item(), input.size(0))
            top1.update(prec1.item(), input.size(0))
            top5.update(prec5.item(), input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.print_freq == 0:
                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f}, {rate_avg:.3f}/s) \t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                    i, len(loader), batch_time=batch_time,
                    rate_avg=input.size(0) / batch_time.avg,
                    loss=losses, top1=top1, top5=top5))

    results = OrderedDict(model=model_name,
        top1=round(top1.avg, 3), top1_err=round(100 - top1.avg, 3),
        top5=round(top5.avg, 3), top5_err=round(100 - top5.avg, 3),
        loss=round(losses.avg, 4), param_count=round(param_count / 1e6, 2))

    print(' * Prec@1 {:.3f} ({:.3f}) Prec@5 {:.3f} ({:.3f})'.format(
       results['top1'], results['top1_err'], results['top5'], results['top5_err']))

    return results
def main():
    args = parser.parse_args()

    # create model
    num_classes = 1000
    model = model_factory.create_model(
        args.model,
        num_classes=num_classes,
        pretrained=args.pretrained,
        test_time_pool=args.test_time_pool)

    # resume from a checkpoint
    if args.restore_checkpoint and os.path.isfile(args.restore_checkpoint):
        print("=> loading checkpoint '{}'".format(args.restore_checkpoint))
        checkpoint = torch.load(args.restore_checkpoint)
        if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
            model.load_state_dict(checkpoint['state_dict'])
        else:
            model.load_state_dict(checkpoint)
        print("=> loaded checkpoint '{}'".format(args.restore_checkpoint))
    elif not args.pretrained:
        print("=> no checkpoint found at '{}'".format(args.restore_checkpoint))
        exit(1)

    if args.multi_gpu:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    transforms = model_factory.get_transforms_eval(
        args.model,
        args.img_size)

    dataset = Dataset(
        args.data,
        transforms)

    loader = data.DataLoader(
        dataset,
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    model.eval()

    batch_time = AverageMeter()
    end = time.time()
    top5_ids = []
    for batch_idx, (input, _) in enumerate(loader):
        input = input.cuda()
        input_var = autograd.Variable(input, volatile=True)
        labels = model(input_var)
        top5 = labels.topk(5)[1]
        top5_ids.append(top5.data.cpu().numpy())

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if batch_idx % args.print_freq == 0:
            print('Predict: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
                batch_idx, len(loader), batch_time=batch_time))

    top5_ids = np.concatenate(top5_ids, axis=0).squeeze()

    with open(os.path.join(args.output_dir, './top5_ids.csv'), 'w') as out_file:
        filenames = dataset.filenames()
        for filename, label in zip(filenames, top5_ids):
            filename = os.path.basename(filename)
            out_file.write('{0},{1},{2},{3},{4},{5}\n'.format(
                filename, label[0], label[1], label[2], label[3], label[4]))
    'height': args.height,
    'width': args.width,
    'model_name': args.model_name,
    'batch_size': args.batch_size,
    'dropout_rate': args.dropout_rate,
    'learning_rate': args.learning_rate,
    'num_epochs': args.num_epochs,
    'num_particles': args.num_particles,
    'scaling_factor': args.steps_per_epoch,
    'num_splits': args.num_splits,
    'debug_mode': args.debug_mode,
    'load_data_with_numpy': args.load_data_with_numpy
}

print('enter your command!')
code.interact(local=dict(globals(), **locals()))

# possible example run 1 - might use a lot of RAM if not exectuted with config['debug_mode'] == True!
config['load_data_with_numpy'] = True
model = model_factory.create_model(config)
train_images, test_images, train_labels, test_labels = dataloader.load_and_preprocess_binary_data(
    config)
model_factory.train_model(model, test_images, test_labels, config,
                          train_images, train_labels)

# possible example run 2 - uses some very dirty fixes, that have to be improved!
model = model_factory.create_model(config)
test_images, test_labels = dataloader.load_and_preprocess_multiclass_validation_data(
    config=config)
model_factory.train_model(model, test_images, test_labels, config)
Exemple #18
0
def main():
    args = parser.parse_args()

    test_time_pool = False
    if 'dpn' in args.model and args.img_size > 224 and not args.no_test_pool:
        test_time_pool = True

    # create model
    num_classes = 1000
    model = model_factory.create_model(args.model,
                                       num_classes=num_classes,
                                       pretrained=args.pretrained,
                                       test_time_pool=test_time_pool)

    print('Model %s created, param count: %d' %
          (args.model, sum([m.numel() for m in model.parameters()])))

    # optionally resume from a checkpoint
    if args.restore_checkpoint and os.path.isfile(args.restore_checkpoint):
        print("=> loading checkpoint '{}'".format(args.restore_checkpoint))
        checkpoint = torch.load(args.restore_checkpoint)
        if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
            model.load_state_dict(checkpoint['state_dict'])
        else:
            model.load_state_dict(checkpoint)
        print("=> loaded checkpoint '{}'".format(args.restore_checkpoint))
    elif not args.pretrained:
        print("=> no checkpoint found at '{}'".format(args.restore_checkpoint))
        exit(1)

    if args.multi_gpu:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    cudnn.benchmark = True

    transforms = model_factory.get_transforms_eval(args.model, args.img_size)

    dataset = Dataset(args.data, transforms)

    loader = data.DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.workers,
                             pin_memory=True)

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()
    end = time.time()
    with torch.no_grad():
        for i, (input, target) in enumerate(loader):
            target = target.cuda()
            input = input.cuda()

            # compute output
            output = model(input)
            loss = criterion(output, target)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
            losses.update(loss.item(), input.size(0))
            top1.update(prec1.item(), input.size(0))
            top5.update(prec5.item(), input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.print_freq == 0:
                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                          i,
                          len(loader),
                          batch_time=batch_time,
                          loss=losses,
                          top1=top1,
                          top5=top5))

    print(
        ' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'
        .format(top1=top1,
                top1a=100 - top1.avg,
                top5=top5,
                top5a=100. - top5.avg))
def main():
    args = parser.parse_args()

    # create model
    num_classes = 1000
    model = model_factory.create_model(args.model,
                                       num_classes=num_classes,
                                       pretrained=args.pretrained,
                                       test_time_pool=args.test_time_pool)

    # resume from a checkpoint
    if args.restore_checkpoint and os.path.isfile(args.restore_checkpoint):
        print("=> loading checkpoint '{}'".format(args.restore_checkpoint))
        checkpoint = torch.load(args.restore_checkpoint)
        if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
            model.load_state_dict(checkpoint['state_dict'])
        else:
            model.load_state_dict(checkpoint)
        print("=> loaded checkpoint '{}'".format(args.restore_checkpoint))
    elif not args.pretrained:
        print("=> no checkpoint found at '{}'".format(args.restore_checkpoint))
        exit(1)

    if args.multi_gpu:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    transforms = model_factory.get_transforms_eval(args.model, args.img_size)

    dataset = Dataset(args.data, transforms)

    loader = data.DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.workers,
                             pin_memory=True)

    model.eval()

    batch_time = AverageMeter()
    end = time.time()
    top5_ids = []
    for batch_idx, (input, _) in enumerate(loader):
        input = input.cuda()
        input_var = autograd.Variable(input, volatile=True)
        labels = model(input_var)
        top5 = labels.topk(5)[1]
        top5_ids.append(top5.data.cpu().numpy())

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if batch_idx % args.print_freq == 0:
            print('Predict: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
                      batch_idx, len(loader), batch_time=batch_time))

    top5_ids = np.concatenate(top5_ids, axis=0).squeeze()

    with open(os.path.join(args.output_dir, './top5_ids.csv'),
              'w') as out_file:
        filenames = dataset.filenames()
        for filename, label in zip(filenames, top5_ids):
            filename = os.path.basename(filename)
            out_file.write('{0},{1},{2},{3},{4},{5}\n'.format(
                filename, label[0], label[1], label[2], label[3], label[4]))