示例#1
0
def evaluate(path, file):

    logger = utils.genlogger(os.path.join(path, 'stats.txt'))
    logger.info("Output Path: {}".format(path))
    logger.info("<---- Evaluation on Test Set ---->")

    obj = torch.load(os.path.join(path, file), lambda stg, loc: stg)
    test_label = obj['test_label']
    config = obj['config']
    Net = torch.load(config['Net'])

    model = getattr(M, config['model'])(Net, n_class=config['n_class'])
    model.load_param(obj['param'])
    model = model.to(device)

    tta_transform = utils.test_transform()
    test_dataloader = dataloader_test(
        config['data_h5'], test_label, tta_transform, \
        T = config['time_step'], **config['dataloader_param']
    )

    _, f1_macro, f1_micro, acc, auc = utils.evaluate(model, test_dataloader,
                                                     device, None,
                                                     config['threshold'])

    logger.info("<---- test evaluation: ---->")
    logger.info(
        "f1_macro: {:.4f}\tf1_micro: {:.4f}\tacc: {:.4f}\tauc: {:.4f}".format(
            f1_macro, f1_micro, acc, auc))
示例#2
0
def run(config_file):
    config = get_config(config_file)

    cur_time = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
    outdir = os.path.join(config['outputdir'], cur_time)
    os.makedirs(outdir)

    logger = utils.genlogger(os.path.join(outdir, 'log.txt'))
    logger.info("Output Path: {}".format(outdir))
    logger.info("<---- config details ---->")
    for key in config:
        logger.info("{}: {}".format(key, config[key]))
    logger.info("<---- end of config ---->")

    train_dev = pd.read_csv(config['train_dev'], sep=',')
    n_class = config['n_class']


    #train_set, dev_set, test_set = utils.train_dev_test_split(df, outdir)
    train_set, dev_set = utils.train_dev_split(train_dev, outdir)
    test_set = pd.read_csv(config['test'], sep=',').values
    num = 5 if args.debug else None

    train_label = utils.one_hot(train_set, n_class, num)
    dev_label = utils.one_hot(dev_set, n_class, num)
    test_label = utils.one_hot(test_set, n_class, num)
    logger.info("train set: {} samples".format(len(train_label)))
    logger.info("dev set: {} samples".format(len(dev_label)))
    logger.info("test set: {} samples".format(len(test_label)))

    Net = torchvision.models.densenet201(pretrained=False)
    Net.load_state_dict(torch.load(config['Net']).state_dict())

    model = getattr(M, config['model'])(
        Net, n_class=n_class, **config['model_param']
    )
    logger.info("model: {}".format(str(model.other)))
    origin_model = model

    if (torch.cuda.device_count() > 1):
        model = torch.nn.DataParallel(model)

    logger.info("Use {} GPU(s)".format(torch.cuda.device_count()))

    model = model.to(device)
    if config['model_param']['Net_grad']:
        optimizer = getattr(optim, config['optim'])([
            {'params': origin_model.get_Net_param(), 'lr': config['Net_lr']}, 
            {'params': origin_model.get_other_param()}
            ], lr=config['other_lr']
        )
    else:
        optimizer = getattr(optim, config['optim'])(
            origin_model.get_other_param(), 
            lr=config['other_lr']
        )

    lr_scheduler = getattr(optim.lr_scheduler, config['lr_scheduler'])(
        optimizer, **config['scheduler_param']
    )

    criterion = getattr(torch.nn, config['Loss'])()

    train_transform = utils.train_transform()
    test_transform = utils.test_transform()
    train_dataloader = dataloader_multiple(
        config['data_h5'], train_label, train_transform, \
        T=config['time_step'], **config['dataloader_param']
    )
    dev_dataloader = dataloader_multiple(
        config['data_h5'], dev_label, test_transform, \
        T=config['time_step'], **config['dataloader_param']
    )
    test_dataloader = dataloader_multiple(
        config['data_h5'], test_label, test_transform, \
        T=config['time_step'], **config['dataloader_param']
    )

    best_dev_loss = np.inf


    dev_loss = one_epoch(
            model, optimizer, criterion, dev_dataloader, False)
    f1_macro, f1_micro, acc = utils.evaluate(
        model, dev_dataloader, device, config['threshold'])
    best_f1 = f1_macro + f1_micro
    logger.info("dev_loss: {:.4f}\tf1_macro: {:.4f}\tf1_micro: {:.4f}\tacc: {:.4f}"\
        .format(dev_loss, f1_macro, f1_micro, acc))
 
    for epoch in range(1, config['n_epoch'] + 1):
        logger.info("<---- Epoch: {} start ---->".format(epoch))
        train_loss = one_epoch(
            model, optimizer, criterion, train_dataloader, True, config['grad_clip']
        )
        dev_loss = one_epoch(
            model, optimizer, criterion, dev_dataloader, False
        )
        logger.info("train_loss: {:.4f}\tdev_loss: {:.4f}".format(train_loss, dev_loss))

        f1_macro, f1_micro, acc = utils.evaluate(
            model, dev_dataloader, device, config['threshold'])

        logger.info("f1_macro: {:.4f}\tf1_micro: {:.4f}\tacc: {:.4f}".format(f1_macro, f1_micro, acc))


        if epoch % config['saveinterval'] == 0:
            model_path = os.path.join(outdir, 'model_{}.th'.format(epoch))
            torch.save({
                "param": origin_model.get_param(),
                "train_label": train_label,
                "dev_label": dev_label,
                "test_label": test_label,
                "config": config
            }, model_path)
        if best_dev_loss > dev_loss:
            model_path = os.path.join(outdir, 'model.th')
            torch.save({
                "param": origin_model.get_param(),
                "train_label": train_label,
                "dev_label": dev_label,
                "test_label": test_label,
                "config": config
            }, model_path)
            best_dev_loss = dev_loss
        if best_f1 < f1_macro + f1_micro:
            model_path = os.path.join(outdir, 'model_acc.th')
            torch.save({
                "param": origin_model.get_param(),
                "train_label": train_label,
                "dev_label": dev_label,
                "test_label": test_label,
                "config": config
            }, model_path)
            best_f1 = f1_macro + f1_micro

        schedarg = dev_loss if lr_scheduler.__class__.__name__ == 'ReduceLROnPlateau' else None
        lr_scheduler.step(schedarg)

    f1_macro, f1_micro, acc = utils.evaluate(
        model, test_dataloader, device, config['threshold'])


    logger.info("f1_macro: {:.4f}\tf1_micro: {:.4f}\tacc: {:.4f}".format(f1_macro, f1_micro, acc))
image_name = "avril.jpg"
image_path  = join(content_dir, image_name)
img = cv.imread(image_path)
channels = img.shape[2]
if channels == 1:
    img = cv.cvtColor(img, cv.COLOR_GRAY2RGB)
elif channels == 4:
    img = cv.cvtColor(img, cv.COLOR_BGRA2RGB)
else:
    print("gm")
    #img = cv.cvtColor(img, cv.COLOR_BGR2RGB)



image_tf = test_transform((resolution,resolution), False)


input_tensor = image_tf(Image.fromarray(img))
input_tensor = input_tensor.cuda()
input_variable = Variable(input_tensor.unsqueeze(0), volatile=True)



# cv.putText(frame, "{}".format(neuron_index), (15,15), cv.FONT_HERSHEY_PLAIN, 1.,(1.,1.,1.))
cv.imshow("Input", img)
cv.waitKey(0)


reference_input = join(ref_path, "reference_encoding_{}_relu{}.npy".format(resolution, layer))
reference_input = np.load(reference_input)
obj = torch.load(args.model, lambda x, y: x)

config = obj['config']
param = obj['param']
Net = torchvision.models.resnet152(pretrained=False)

n_class = config['n_class']
model = getattr(M, config['model'])(Net,
                                    n_class=n_class,
                                    **config['model_param'])
model = model.to(device)

model.load_param(param)

transform = utils.test_transform()

# train_dev = pd.read_csv(config['train_dev'], sep=',')
# train_set = train_dev.values
# test_set = pd.read_csv(config['test'], sep=',').values
# num = 5 if args.debug else None

# train_label = utils.one_hot(train_set, n_class, num)
# test_label = utils.one_hot(test_set, n_class, num)

data_h5 = config['data_h5']
dim = model.pretrain_dim
with torch.set_grad_enabled(False):
    with h5py.File(os.path.join(args.output, 'feature_{}.hdf5'.format(dim)),
                   'w') as out:
        data = h5py.File(data_h5, 'r')
示例#5
0
vgg.load_state_dict(torch.load(vgg_path))
vgg = nn.Sequential(*list(vgg.children())[:31])

vgg.cuda()


for file in os.listdir(content_dir):
    t = time.time()

    image_name = file.split(".")[0:-1][0]
    out_file = "encodings/{}_{}".format(image_name,content_size)
    if exists(out_file +".npy"):
        print("{} already calculated, skipping".format(out_file))
        continue

    image_tf = test_transform(content_size, crop)

    img = cv.imread(join(content_dir, file))
    channels =img.shape[2]
    if channels ==1:
        image = cv.cvtColor(img, cv.COLOR_GRAY2RGB)
    elif channels ==4:
        image = cv.cvtColor(img, cv.COLOR_BGRA2RGB)
    else:
        image = cv.cvtColor(img, cv.COLOR_BGR2RGB)

    image = image_tf(Image.fromarray(image))

    image = image.cuda()
    image = Variable(image.unsqueeze(0), volatile=True)
    image_f =  vgg(image)