コード例 #1
0
def test(threshold, model_name='model'):
    print("Loading data...")
    test_data = utils.load_test_data(test_file, me, ms, mr)
    test_example_num = len(test_data["input_ids"])
    print("Done.")

    with tf.Session() as sess:
        model = Model(
            max_entity_num=me,
            max_sentence_num=ms,
            max_relation_num=mr,
            max_seq_length=FLAGS.max_seq_length,
            class_num=len(rel2id),
            entity_types=len(ner2id),
            bert_config=bert_config,
            hidden_size=FLAGS.hidden_size,
            hidden_layers=FLAGS.hidden_layers,
            attention_heads=FLAGS.attention_heads,
            intermediate_size=FLAGS.intermediate_size,
            hidden_dropout_prob=bert_config.hidden_dropout_prob,
            attention_probs_dropout_prob=bert_config.
            attention_probs_dropout_prob,
            graph_hidden_dropout_prob=FLAGS.graph_hidden_dropout_prob,
            graph_attention_probs_dropout_prob=FLAGS.
            graph_attention_probs_dropout_prob,
        )

        saver = tf.train.Saver()
        checkpoint = os.path.join(checkpoint_dir, model_name)
        saver.restore(sess, checkpoint)

        test_logits = []
        test_index = []
        for batch_index in tqdm(
                utils.batch_iter(test_example_num, FLAGS.batch_size, False)):
            feed_dict = {
                model.input_ids: test_data["input_ids"][batch_index],
                model.input_mask: test_data["input_mask"][batch_index],
                model.segment_ids: test_data["segment_ids"][batch_index],
                model.entity_mask: test_data["entity_mask"][batch_index],
                model.entity_types: test_data["entity_types"][batch_index],
                model.sentence_mask: test_data["sentence_mask"][batch_index],
                model.attention_mask: test_data["attention_mask"][batch_index],
                model.relation_mask: test_data["relation_mask"][batch_index],
                model.head_mask: test_data["head_mask"][batch_index],
                model.tail_mask: test_data["tail_mask"][batch_index],
                model.is_training: False
            }
            logit = sess.run(model.sigmoid, feed_dict)
            test_logits.append(logit)
            test_index += batch_index
        test_logits = np.concatenate(test_logits, axis=0)

    utils.inference(test_logits, test_data, test_index, threshold)
コード例 #2
0
def get_actions(ball_location, last_ball_location, left_location, left_model, right_location, right_model):
    left_action = get_random_action(ALL_ACTIONS)
    right_action = get_random_action(ALL_ACTIONS)
    if last_ball_location is None:
        last_ball_location = ball_location
    if ball_location is not None:
        if left_location is not None:
            # here we are flipping X
            left_ball_loc = [ball_location[0], GAME_WIDTH - ball_location[1]]
            left_last_ball_loc = [last_ball_location[0], GAME_WIDTH - last_ball_location[1]]
            left_action = inference(left_ball_loc, left_last_ball_loc, left_location, right_location, left_model)
        if right_location is not None:
            right_action = inference(ball_location, last_ball_location, right_location, left_location, right_model)
    else:
        left_action = [0, 0]
        right_action = [0, 0]
    return left_action, right_action
コード例 #3
0
def car_recognize():
    try:
        data = json.loads(request.data)
        preds = utils.inference(model, data['content'])
        response = {'probabilities': preds}
        return jsonify(response)

    except BaseException as err:
        print(err)
        raise (err)
コード例 #4
0
    def single_demo(self):
        self.net.eval()

        current_dir = os.path.dirname(os.path.realpath(__file__))

        image_list = os.listdir(self.args.image_path)
        image_list.sort()
        start_time = time.time()
        inference(self.input_transform,
                  self.net,
                  image_list,
                  self.args.image_path,
                  save_dir=self.args.output_bin)
        duration = time.time() - start_time
        print('{}s used to make predictions.\n'.format(duration))

        conf_m, IOU = calculate_iou(self.args.nb_classes, self.args.output_bin,
                                    self.args.label_path, self.args.image_path,
                                    image_list)
        print('IOU: ')
        print(IOU)
        print('meanIOU: %f' % np.mean(IOU))
        print('pixel acc: %f' % (np.sum(np.diag(conf_m)) / np.sum(conf_m)))
コード例 #5
0
ファイル: reconstruct.py プロジェクト: sundonkey/BionoiNet
def reconstruction_creation(index, dataset):
    '''create reconstructed image and calculate loss between original and reconstructed image'''

    image, name = dataset[index]
    reconstruct_image = inference(device, image.unsqueeze(0), model)  # create reconstructed image using model
    recon_detach = reconstruct_image.detach()
    recon_cpu = recon_detach.cpu() # send to cpu
    recon_numpy = recon_cpu.numpy()  # convert image to numpy array for easier calculations
    recon_numpy = np.squeeze(recon_numpy, axis=0)

    criterion = nn.MSELoss()
    loss = str(criterion(image.unsqueeze(0).cpu(), reconstruct_image.cpu()).item()) # calculate loss

    return recon_numpy, loss
コード例 #6
0
ファイル: mil.py プロジェクト: Tahy1/MIL_spore
def main():
    fmoment = int(time.time())
    args = parse_args()
    norm = args.norm
    backbone = args.backbone
    pretrained = args.pretrained
    lossfunc = args.loss
    size = args.size
    pk = args.pk
    nk = args.nk
    n_epoch = args.n_epoch
    gpu = args.gpu
    test_every = args.test_every
    ckpt = args.ckpt
    print(
        'norm=%s backbone=%s pretrained=%s lossfunc=%s size=%s pk=%d nk=%d epoch=%d gpu=%d test_every=%d ckpt=%s'
        % (norm, backbone, pretrained, lossfunc, size, pk, nk, n_epoch, gpu,
           test_every, ckpt))
    if backbone == 'resnet18':
        model = resnet18.resnet18(norm=norm).cuda(device=gpu)
    if pretrained == 'pretrained':
        ckpt_dict = torch.load('resnet18-pretrained.pth')
        model_dict = model.state_dict()
        ckpt_dict = {k: v for k, v in ckpt_dict.items() if k in model_dict}
        model_dict.update(ckpt_dict)
        model.load_state_dict(model_dict)
    if lossfunc == 'CE':
        criterion = nn.CrossEntropyLoss().cuda(device=gpu)
    elif lossfunc == 'Focal':
        criterion = FocalLoss(class_num=2, gpu=gpu).cuda(device=gpu)
        for m in model.modules():
            if isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, -math.log(99))
    elif lossfunc == 'BCE':
        criterion = BCE(class_num=2, gpu=gpu).cuda(device=gpu)
    optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-4)
    cudnn.benchmark = True
    train_trans = transforms.Compose([
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomVerticalFlip(p=0.5),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.2005, 0.1490, 0.1486],
                             std=[0.1445, 0.1511, 0.0967])
    ])
    infer_trans = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.2005, 0.1490, 0.1486],
                             std=[0.1445, 0.1511, 0.0967])
    ])
    train_dset = XDataset('train-%s.lib' % size,
                          train_trans=train_trans,
                          infer_trans=infer_trans)
    train_loader = torch.utils.data.DataLoader(train_dset,
                                               batch_size=64,
                                               shuffle=False,
                                               pin_memory=True)
    test_dset = XDataset('test-%s.lib' % size,
                         train_trans=train_trans,
                         infer_trans=infer_trans)
    test_loader = torch.utils.data.DataLoader(test_dset,
                                              batch_size=128,
                                              shuffle=False,
                                              pin_memory=True)

    if ckpt != 'none':
        checkpoint = torch.load(ckpt)
        start = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        best_f1 = checkpoint['best_f1']
        optimizer.load_state_dict(checkpoint['optimizer'])
        if not os.path.exists(
                'logs/Training_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
            (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment)):
            fconv = open(
                'logs/Training_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
                (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment),
                'w')
            fconv.write('time,epoch,loss,error\n')
            fconv.write('%d,0,0,0\n' % fmoment)
            fconv.close()
        if not os.path.exists(
                'logs/Testing_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
            (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment)):
            fconv = open(
                'logs/Testing_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
                (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment),
                'w')
            fconv.write('time,epoch,loss,error,tp,tn,fp,fn,f1,S\n')
            fconv.write('%d,0,0,0\n' % fmoment)
            fconv.close()
    else:
        start = 0
        best_f1 = 0
        fconv = open(
            'logs/Training_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
            (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment), 'w')
        fconv.write('time,epoch,loss,error\n')
        fconv.write('%d,0,0,0\n' % fmoment)
        fconv.close()

        fconv = open(
            'logs/Testing_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
            (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment), 'w')
        fconv.write('time,epoch,loss,error,tp,tn,fp,fn,f1,S\n')
        fconv.write('%d,0,0,0\n' % fmoment)
        fconv.close()

    for epoch in range(start, n_epoch):
        train_dset.setmode(1)
        _, probs = inference(epoch, train_loader, model, criterion, gpu)
        #        torch.save(probs,'probs/train-%d.pth'%(epoch+1))
        probs1 = probs[:train_dset.plen]
        probs0 = probs[train_dset.plen:]

        topk1 = np.array(
            group_argtopk(np.array(train_dset.slideIDX[:train_dset.plen]),
                          probs1, pk))
        topk0 = np.array(
            group_argtopk(np.array(train_dset.slideIDX[train_dset.plen:]),
                          probs0, nk)) + train_dset.plen
        topk = np.append(topk1, topk0).tolist()
        #        torch.save(topk,'topk/train-%d.pth'%(epoch+1))
        #        maxs = group_max(np.array(train_dset.slideIDX), probs, len(train_dset.targets))
        #        torch.save(maxs, 'maxs/%d.pth'%(epoch+1))
        sf(topk)
        train_dset.maketraindata(topk)
        train_dset.setmode(2)
        loss, err = train(train_loader, model, criterion, optimizer, gpu)
        moment = time.time()
        writecsv([moment, epoch + 1, loss, err],
                 'logs/Training_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
                 (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment))
        print('Training epoch=%d, loss=%.5f, error=%.5f' %
              (epoch + 1, loss, err))
        if (epoch + 1) % test_every == 0:
            test_dset.setmode(1)
            loss, probs = inference(epoch, test_loader, model, criterion, gpu)
            #            torch.save(probs,'probs/test-%d.pth'%(epoch+1))
            #            topk = group_argtopk(np.array(test_dset.slideIDX), probs, pk)
            #            torch.save(topk, 'topk/test-%d.pth'%(epoch+1))
            maxs = group_max(
                np.array(test_dset.slideIDX), probs,
                len(test_dset.targets))  #返回每个切片的最大æ?‚率
            #            torch.save(maxs, 'maxs/test-%d.pth'%(epoch+1))
            pred = [1 if x >= 0.5 else 0 for x in maxs]
            tp, tn, fp, fn = tfpn(pred, test_dset.targets)
            err = calc_err(pred, test_dset.targets)
            S, f1 = score(tp, tn, fp, fn)
            moment = time.time()
            writecsv(
                [moment, epoch + 1, loss, err, tp, tn, fp, fn, f1, S],
                'logs/Testing_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
                (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment))
            print('Testing epoch=%d, loss=%.5f, error=%.5f' %
                  (epoch + 1, loss, err))
            #Save best model
            if f1 >= best_f1:
                best_f1 = f1
                obj = {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_f1': best_f1,
                    'optimizer': optimizer.state_dict()
                }
                torch.save(
                    obj, 'ckpt_%s_%s_%s_%s_%s_%d_%d_%d.pth' %
                    (norm, backbone, pretrained, lossfunc, size, pk, nk,
                     fmoment))
コード例 #7
0
def train_srgans(train_loader, val_loader, generator, discriminator, device,
                 args):

    # Loss Function #
    criterion_Perceptual = PerceptualLoss(args.model).to(device)

    # For SRGAN #
    criterion_MSE = nn.MSELoss()
    criterion_TV = TVLoss()

    # For ESRGAN #
    criterion_BCE = nn.BCEWithLogitsLoss()
    criterion_Content = nn.L1Loss()

    # Optimizers #
    D_optim = torch.optim.Adam(discriminator.parameters(),
                               lr=args.lr,
                               betas=(0.9, 0.999))
    G_optim = torch.optim.Adam(generator.parameters(),
                               lr=args.lr,
                               betas=(0.9, 0.999))

    D_optim_scheduler = get_lr_scheduler(D_optim, args)
    G_optim_scheduler = get_lr_scheduler(G_optim, args)

    # Lists #
    D_losses, G_losses = list(), list()

    # Train #
    print("Training {} started with total epoch of {}.".format(
        str(args.model).upper(), args.num_epochs))

    for epoch in range(args.num_epochs):
        for i, (high, low) in enumerate(train_loader):

            discriminator.train()
            if args.model == "srgan":
                generator.train()

            # Data Preparation #
            high = high.to(device)
            low = low.to(device)

            # Initialize Optimizers #
            D_optim.zero_grad()
            G_optim.zero_grad()

            #######################
            # Train Discriminator #
            #######################

            set_requires_grad(discriminator, requires_grad=True)

            # Generate Fake HR Images #
            fake_high = generator(low)

            if args.model == 'srgan':

                # Forward Data #
                prob_real = discriminator(high)
                prob_fake = discriminator(fake_high.detach())

                # Calculate Total Discriminator Loss #
                D_loss = 1 - prob_real.mean() + prob_fake.mean()

            elif args.model == 'esrgan':

                # Forward Data #
                prob_real = discriminator(high)
                prob_fake = discriminator(fake_high.detach())

                # Relativistic Discriminator #
                diff_r2f = prob_real - prob_fake.mean()
                diff_f2r = prob_fake - prob_real.mean()

                # Labels #
                real_labels = torch.ones(diff_r2f.size()).to(device)
                fake_labels = torch.zeros(diff_f2r.size()).to(device)

                # Adversarial Loss #
                D_loss_real = criterion_BCE(diff_r2f, real_labels)
                D_loss_fake = criterion_BCE(diff_f2r, fake_labels)

                # Calculate Total Discriminator Loss #
                D_loss = (D_loss_real + D_loss_fake).mean()

            # Back Propagation and Update #
            D_loss.backward()
            D_optim.step()

            ###################
            # Train Generator #
            ###################

            set_requires_grad(discriminator, requires_grad=False)

            if args.model == 'srgan':

                # Adversarial Loss #
                prob_fake = discriminator(fake_high).mean()
                G_loss_adversarial = torch.mean(1 - prob_fake)
                G_loss_mse = criterion_MSE(fake_high, high)

                # Perceptual Loss #
                lambda_perceptual = 6e-3
                G_loss_perceptual = criterion_Perceptual(fake_high, high)

                # Total Variation Loss #
                G_loss_tv = criterion_TV(fake_high)

                # Calculate Total Generator Loss #
                G_loss = args.lambda_adversarial * G_loss_adversarial + G_loss_mse + lambda_perceptual * G_loss_perceptual + args.lambda_tv * G_loss_tv

            elif args.model == 'esrgan':

                # Forward Data #
                prob_real = discriminator(high)
                prob_fake = discriminator(fake_high)

                # Relativistic Discriminator #
                diff_r2f = prob_real - prob_fake.mean()
                diff_f2r = prob_fake - prob_real.mean()

                # Labels #
                real_labels = torch.ones(diff_r2f.size()).to(device)
                fake_labels = torch.zeros(diff_f2r.size()).to(device)

                # Adversarial Loss #
                G_loss_bce_real = criterion_BCE(diff_f2r, real_labels)
                G_loss_bce_fake = criterion_BCE(diff_r2f, fake_labels)

                G_loss_bce = (G_loss_bce_real + G_loss_bce_fake).mean()

                # Perceptual Loss #
                lambda_perceptual = 1e-2
                G_loss_perceptual = criterion_Perceptual(fake_high, high)

                # Content Loss #
                G_loss_content = criterion_Content(fake_high, high)

                # Calculate Total Generator Loss #
                G_loss = args.lambda_bce * G_loss_bce + lambda_perceptual * G_loss_perceptual + args.lambda_content * G_loss_content

            # Back Propagation and Update #
            G_loss.backward()
            G_optim.step()

            # Add items to Lists #
            D_losses.append(D_loss.item())
            G_losses.append(G_loss.item())

            ####################
            # Print Statistics #
            ####################

            if (i + 1) % args.print_every == 0:
                print(
                    "{} | Epoch [{}/{}] | Iterations [{}/{}] | D Loss {:.4f} | G Loss {:.4f}"
                    .format(
                        str(args.model).upper(), epoch + 1, args.num_epochs,
                        i + 1, len(train_loader), np.average(D_losses),
                        np.average(G_losses)))

                # Save Sample Images #
                sample_images(val_loader, args.batch_size, args.scale_factor,
                              generator, epoch, args.samples_path, device)

        # Adjust Learning Rate #
        D_optim_scheduler.step()
        G_optim_scheduler.step()

        # Save Model Weights and Inference #
        if (epoch + 1) % args.save_every == 0:
            torch.save(
                generator.state_dict(),
                os.path.join(
                    args.weights_path,
                    '{}_Epoch_{}.pkl'.format(generator.__class__.__name__,
                                             epoch + 1)))
            inference(val_loader, generator, args.upscale_factor, epoch,
                      args.inference_path, device)
コード例 #8
0
print('Finished Training')
torch.save(net.state_dict(), 'model/poisoned_nontarget_pytorch.pth')


# %%
# train clean
x_train_clean, y_train_clean = x_train.copy(), y_train.copy()
y_train_clean = np.argmax(y_train_clean, 1)
trainset_clean = torch.utils.data.TensorDataset(
    torch.tensor(x_train_clean),
    torch.tensor(y_train_clean))
trainloader_clean = torch.utils.data.DataLoader(trainset_clean, batch_size=8,
                                                shuffle=False, num_workers=2)
print('\ntrainloader_clean')
preds_train_clean = inference(trainloader_clean, net, device)
MyConfMat(y_train_clean.tolist(), preds_train_clean)

# train poison
trainset_poison = torch.utils.data.TensorDataset(
    torch.tensor(x_train_poison),
    torch.tensor(y_train_clean))
trainloader_poison = torch.utils.data.DataLoader(trainset_poison, batch_size=8,
                                                 shuffle=False, num_workers=2)
print('\ntrainloader_poison')
preds_train_poison = inference(trainloader_poison, net, device)
MyConfMat(y_train_clean.tolist(), preds_train_poison)

# train all poison
x_train_allpoison, y_train_allpoison = x_train.copy(), y_train.copy()
x_train_allpoison, y_train_allpoison = poison(x_train_allpoison,
コード例 #9
0
ファイル: inference.py プロジェクト: greatwallet/mountain-car
# pickle_path
pickle_path = osp.join('pickles', 'latest.pickle')

# discretized state value
min_state_val = 0
max_state_val = 40

# random seed
seed = 42

# learning mode "Q-learning", "SARSA" or "Expected-SARSA"
learning_mode = "Q-learning"

# save path
save_path = None  # 'results'
if save_path is not None and not osp.exists(save_path):
    os.makedirs(save_path)

if __name__ == "__main__":
    score = inference(pickle_path=pickle_path,
                      env_name=env_name,
                      epsilon=epsilon,
                      min_state_val=min_state_val,
                      max_state_val=max_state_val,
                      seed=seed,
                      save_path=save_path,
                      learning_mode=learning_mode)
    print("Score: {} (Model: {}; Env: {}) ".format(score, pickle_path,
                                                   env_name))
コード例 #10
0
def main():
    args = Args()

    logger = get_logger('main')
    EXP_NAME = 'multihead 4 layer with mask'
    assert EXP_NAME is not None, '이거슨 무슨 실험이냐!!'
    print(EXP_NAME)

    kaggle = KaggleData(args.train_path, args.test_path)
    kaggle.build_field(args.max_len, include_lengths=args.lengths)
    kaggle.build_dataset(split_ratio=0.9,
                         stratified=False,
                         strata_field='target')
    kaggle.build_vocab('question',
                       args.max_vocab,
                       min_freq=args.min_freq,
                       pretrained_vectors=args.embedding,
                       cache=args.cache)
    kaggle.build_iterator(batch_sizes=[args.batch_size] * 3,
                          device=args.device)
    kaggle.summary()

    logger.info('building model...')
    model = build_model(kaggle, args)

    #TODO: hyperparam pos_wieght is to be tuned
    criterion = nn.BCEWithLogitsLoss(
        reduction='sum',
        pos_weight=torch.tensor([args.pos_weight], device=args.device))
    optimizer, scheduler = build_optimizer_scheduler(
        'Adam',
        lr=0.001,
        parameters=model.parameters(),
        factor=0.5,
        patience=args.scheduler_patience,
        verbose=True)
    logger.info('start training...')
    early_stopping = EarlyStoppingCriterion(patience=args.early_stop_patience)
    for epoch in range(args.epoch):
        loss = run_epoch(model, kaggle.train_iter, criterion, optimizer)
        f1_score, accuracy = evaluate(model,
                                      kaggle.valid_iter,
                                      threshold=args.threshold,
                                      vocab=kaggle.vocab,
                                      verbose=False)
        scheduler.step(f1_score)
        print('loss at epoch {}: {:.5}'.format(epoch + 1, loss))
        print('f1 score / accuracy on valid: {:.4} / {:.4}'.format(
            f1_score, accuracy))

        if early_stopping(epoch, f1_score):
            if early_stopping.is_improved:
                logger.info('best model achieved in this epoch')
                # TODO: path name!!
                torch.save(model.state_dict(), 'best_model.pt')
        else:
            logger.info('early stopping...')
            break
        print()

    logger.info('best model is from epoch {} (f1: {:.4})'.format(
        early_stopping.best_epoch, early_stopping.best_score))
    model.load_state_dict(torch.load('best_model.pt'))

    logger.info('selecting threshold...')
    best = 0
    best_threshold = 0
    for th in np.arange(0.2, 0.6, 0.05):
        # FIXME: verbose
        f1_score, accuracy = evaluate(model,
                                      kaggle.valid_iter,
                                      threshold=float(th),
                                      vocab=kaggle.vocab,
                                      verbose=False)
        if f1_score > best:
            best = f1_score
            best_threshold = th
    print('best f1_score with threshold {}: {:.4} '.format(
        best_threshold, float(best)))

    pred_total, qid_total = inference(model, kaggle.test_iterator,
                                      best_threshold)
    write_to_csv(pred_total, qid_total, path='submission.csv')
コード例 #11
0
    model.train()
    img1, anno1, img2, anno2 = img1.to('cuda'), anno1.to('cuda'), img2.to(
        'cuda'), anno2.to('cuda')
    loss = model.calc_loss(img1, anno1, img2, anno2, K,
                           utils.get_max_label(video))
    optim.zero_grad()
    loss.backward()
    optim.step()
    print(
        '{}/100000. {:.02f} seconds passed. current loss {:.05f}, lr {}, video {}'
        .format(ix + 1,
                time.time() - st, loss.data, lr, video),
        flush=True)
    if ix % ckpt_iter == 0:
        #checkpoint
        utils.save_to_checkpoint(model, ix, DEBUG)

    if (ix + 1) % lr_update_iter == 0:
        #adjust learning rate
        lr_before = lr
        lr = lr * lr_factor
        print('adjust learning rate .. form {} to {}'.format(lr_before, lr))
        optim = factory.make_optim(model, lr, momentum, weight_decay)

    if (DEBUG and ix % 50 == 0) or ix % 500 == 0 or (ix < 500
                                                     and ix % 100 == 0):
        model.eval()
        print('inference started.')
        utils.inference(model, K, ix, DEBUG)
        print('inference finished.')
コード例 #12
0
    # ---------------------------------------------------------------------
    # Inference
    # ---------------------------------------------------------------------

    if args.inference:

        num_correct = [0]

        def collector(outputs, targets):
            pred = outputs.cpu().max(1)[1]
            num_correct[0] += pred.eq(targets.cpu().view_as(pred)).sum().item()

        inference(model,
                  test_loader,
                  criterion,
                  collector,
                  args)

        num_correct = num_correct[0]
        test_accuracy = 100. * (num_correct / len(test_loader.dataset))

        print('Test set: Accuracy: {}/{} ({:.0f}%)\n'.format(
            num_correct,
            len(test_loader.dataset),
            test_accuracy
        ))

    # ---------------------------------------------------------------------
    # Model saving
    # ---------------------------------------------------------------------
コード例 #13
0
# -------------------------------------------------------------------------
# Inference
# -------------------------------------------------------------------------

if args.inference:

    loss = F.binary_cross_entropy_with_logits
    preds = []

    def collector(outputs, targets):
        preds.append(outputs.detach().cpu().numpy())

    inference(model,
              seq_dataloader,
              loss,
              collector,
              args)

    # ---------------------------------------------------------------------
    # Validation
    # ---------------------------------------------------------------------

    if args.validate:
        preds = np.vstack(preds)

        assert isinstance(preds, np.ndarray)
        assert preds.shape[0] == len(X_train)
        assert preds.shape[1] == n_toks

        X_valid = np.load('%s_valid.npy' % args.cache_path, allow_pickle=True)
コード例 #14
0
ファイル: lenet5.py プロジェクト: cornell-brg/hb-pytorch-apps
    print(model)

    # Quit here if dry run
    if args.dry:
        exit(0)

    # Training
    if args.training:
        utils.train(model, trainloader, optimizer, loss_func, args)

    # Inference
    if args.inference:

        num_correct = [0]

        def collector(outputs, targets):
            pred = outputs.cpu().max(1)[1]
            num_correct[0] += pred.eq(targets.cpu().view_as(pred)).sum().item()

        utils.inference(model, testloader, loss_func, collector, args)

        num_correct = num_correct[0]
        test_accuracy = 100. * (num_correct / len(testloader.dataset))

        print('Test set: Accuracy: {}/{} ({:.0f}%)\n'.format(
            num_correct, len(testloader.dataset), test_accuracy))

    # Save model
    if args.save_model:
        utils.save_model(model, args.model_filename)
コード例 #15
0
    print('flattened input size:', input_size)

    # instantiate and load model
    if style == 'conv':
        model = ConvAutoencoder()
    elif style == 'dense':
        model = DenseAutoencoder(input_size, feature_size)

    # if there are multiple GPUs, split the batch to different GPUs
    if torch.cuda.device_count() > 1:
        print("Using " + str(torch.cuda.device_count()) + " GPUs...")
        model = nn.DataParallel(model)
    model.load_state_dict(torch.load(model_file))

    # get the reconstructed image
    reconstruct_image = inference(device, image.unsqueeze(0), model)
    print('shape of reconstructed image:', reconstruct_image.shape)
    #print(reconstruct_image)

    # measure the loss between the 2 images
    criterion = nn.MSELoss()
    loss = criterion(image.unsqueeze(0).cpu(), reconstruct_image.cpu())
    print('loss between before and after:', loss)

    # plot images before and after reconstruction
    fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(14, 7))
    ax1.imshow(np.transpose(image.numpy(), (1, 2, 0)))
    ax1.set_title('Original Image')
    ax2.imshow(
        np.transpose(reconstruct_image.squeeze().detach().cpu().numpy(),
                     (1, 2, 0)))
コード例 #16
0
ファイル: demo.py プロジェクト: thainguyentrong/triplet-net
                             image_dir=hp.image_dir,
                             label_dir=hp.label_dir,
                             subset='valid')

    for q_name, attribute in q_valid.get_queries().items():
        start = time.time()
        bbox, class_idx = attribute[0], attribute[1]
        # create image tensor
        query_img = image_preprocessing(hp.image_dir + q_name)
        query_tensor = torch.FloatTensor(
            np.transpose(np.expand_dims(query_img, axis=0), axes=[0, 3, 1, 2]))
        # get embedding vector
        if use_gpu:
            query_tensor = query_tensor.cuda()
            cs_func = cs_func.cuda()
        query_embedding = inference(model, query_tensor)
        similarity = cs_func(query_embedding, db_embeddings).topk(
            len(q_valid.get_groundtruth()[class_idx]))
        prediction = [maps[idx] for idx in similarity[1].cpu().numpy()]
        end = time.time()
        score = similarity[0].cpu().numpy()
        AP = calculate_AP(prediction=prediction,
                          score=score,
                          groundtruth=q_valid.get_groundtruth()[class_idx])
        mAP.append(AP)
        time_running.append(end - start)

        # visualization
        # visualize(q_name, prediction, score)
        # break
コード例 #17
0
 def infer(data_path, **kwargs):
     return inference(model, data_path)
コード例 #18
0
def train_srcnns(train_loader, val_loader, model, device, args):

    # Loss Function #
    criterion = nn.L1Loss()

    # Optimizers #
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 betas=(0.5, 0.999))
    optimizer_scheduler = get_lr_scheduler(optimizer=optimizer, args=args)

    # Lists #
    losses = list()

    # Train #
    print("Training {} started with total epoch of {}.".format(
        str(args.model).upper(), args.num_epochs))

    for epoch in range(args.num_epochs):
        for i, (high, low) in enumerate(train_loader):

            # Data Preparation #
            high = high.to(device)
            low = low.to(device)

            # Forward Data #
            generated = model(low)

            # Calculate Loss #
            loss = criterion(generated, high)

            # Initialize Optimizer #
            optimizer.zero_grad()

            # Back Propagation and Update #
            loss.backward()
            optimizer.step()

            # Add items to Lists #
            losses.append(loss.item())

            # Print Statistics #
            if (i + 1) % args.print_every == 0:
                print("{} | Epoch [{}/{}] | Iterations [{}/{}] | Loss {:.4f}".
                      format(
                          str(args.model).upper(), epoch + 1, args.num_epochs,
                          i + 1, len(train_loader), np.average(losses)))

                # Save Sample Images #
                sample_images(val_loader, args.batch_size, args.upscale_factor,
                              model, epoch, args.samples_path, device)

        # Adjust Learning Rate #
        optimizer_scheduler.step()

        # Save Model Weights and Inference #
        if (epoch + 1) % args.save_every == 0:
            torch.save(
                model.state_dict(),
                os.path.join(
                    args.weights_path,
                    '{}_Epoch_{}.pkl'.format(model.__class__.__name__,
                                             epoch + 1)))
            inference(val_loader, model, args.upscale_factor, epoch,
                      args.inference_path, device)
コード例 #19
0
def main():
    # Configurations
    lr = 0.00000001  # learning rate
    batch_size = 64  # batch_size
    last_epoch = 1  # the last training epoch. (defulat: 1)
    max_epoch = 553  # maximum epoch for the training.

    num_boxes = 2  # the number of boxes for each grid in Yolo v1.
    num_classes = 20  # the number of classes in Pascal VOC Detection.
    grid_size = 7  # 3x224x224 image is reduced to (5*num_boxes+num_classes)x7x7.
    lambda_coord = 7  # weight for coordinate regression loss.
    lambda_noobj = 0.5  # weight for no-objectness confidence loss.
    """ dataset load """
    train_dset = VOCDetection(root=data_root, split='train')
    train_dloader = DataLoader(train_dset,
                               batch_size=batch_size,
                               shuffle=True,
                               drop_last=True,
                               num_workers=8)
    #drop_last 마지막 애매하게 남는 데이터들은 버림
    test_dset = VOCDetection(root=data_root, split='test')
    test_dloader = DataLoader(test_dset,
                              batch_size=batch_size,
                              shuffle=False,
                              drop_last=False,
                              num_workers=8)
    """ model load """
    model = Yolo(grid_size, num_boxes, num_classes)
    #model = nn.DataParallel(model, device_ids = [5,6,7])
    model = model.to(DEVICE)

    #pretrained_weights = torch.load(pretrained_backbone_path)
    #model.load_state_dict(pretrained_weights)
    """ optimizer / loss """
    model.features.requires_grad_(False)
    model_params = [v for v in model.parameters() if v.requires_grad is True]
    optimizer = optim.Adam(model_params, lr=lr, betas=[0.9, 0.999])
    # Load the last checkpoint if exits.
    ckpt_path = os.path.join(ckpt_dir, 'last_best.pth')
    if os.path.exists(ckpt_path):
        ckpt = torch.load(ckpt_path, map_location='cuda:3')
        model.load_state_dict(ckpt['model'])
        optimizer.load_state_dict(ckpt['optimizer'])
        last_epoch = ckpt['epoch'] + 1
        print('Last checkpoint is loaded. start_epoch:', last_epoch)
    else:
        print('No checkpoint is found.')

    Yolov1Loss = Loss(7, 2, 20)
    #ckpt_path = os.path.join(ckpt_dir, 'last_best.pth')
    """ training """
    # Training & Testing.
    model = model.to(DEVICE)
    best_loss = 1
    for epoch in range(1, max_epoch):
        step = 0
        # Learning rate scheduling
        if epoch in [50, 150, 550, 600]:
            lr *= 0.1
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

        if epoch < last_epoch:
            continue

        model.train()
        for x, y in train_dloader:
            step += 1
            imgs = Variable(x)
            gt_outs = Variable(y)
            imgs, gt_outs = imgs.to(DEVICE), gt_outs.to(DEVICE)
            model_outs = model(imgs)
            loss = Yolov1Loss(model_outs, gt_outs)

            if loss < best_loss:
                best_loss = loss
                ckpt = {
                    'model': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'epoch': epoch
                }
                torch.save(ckpt, ckpt_path)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            print('step:{}/{} | loss:{:.8f}'.format(step, len(train_dloader),
                                                    loss.item()))

        model.eval()
        val_loss = 0.0
        with torch.no_grad():
            for x, y in test_dloader:
                imgs = Variable(x)
                gt_outs = Variable(y)
                imgs, gt_outs = imgs.to(DEVICE), gt_outs.to(DEVICE)

                model_outs = model(imgs)
                loss = Yolov1Loss(model_outs, gt_outs)
                loss_iter = loss.item()
            print('Epoch [%d/%d], Val Loss: %.4f' %
                  (epoch, max_epoch, loss_iter))

        ckpt = {
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'epoch': epoch
        }
        torch.save(ckpt, ckpt_path)
    ''' test '''

    test_image_dir = os.path.join(root, 'test_images')
    image_path_list = [
        os.path.join(test_image_dir, path)
        for path in os.listdir(test_image_dir)
    ]

    for image_path in image_path_list:
        inference(model, image_path)
コード例 #20
0
ファイル: eval.py プロジェクト: CSberlin/SiamDLT
import dataset
import factory
import utils
import os

DEBUG = True
gpu = 1
K = 100
print('running on gpu {}'.format(gpu))
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)

ckpt_file = 'PATH TO YOUR CHECKPOINT FILE'

model = factory.make_siamese_model(ckpt_file).cuda()
model.eval()

dataset = dataset.DAVIS2017(DEBUG)
utils.inference(model, K, 'unknown', DEBUG)

コード例 #21
0
def main(args):

    # Device Configuration for Multi-GPU Environment #
    device = torch.device(
        f'cuda:{args.gpu_num}' if torch.cuda.is_available() else 'cpu')

    # Fix Seed for Reproducibility #
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    # Samples and Weights Path #
    paths = [args.samples_path, args.weights_path]
    for path in paths:
        make_dirs(path)

    # Prepare Data Loader #
    train_div2k_loader = get_div2k_loader(sort='train',
                                          batch_size=args.batch_size,
                                          image_size=args.image_size,
                                          upscale_factor=args.upscale_factor,
                                          crop_size=args.crop_size,
                                          patch_size=args.patch_size,
                                          patch=args.patch,
                                          flip=args.flip,
                                          rotate=args.rotate)

    val_div2k_loader = get_div2k_loader(sort='val',
                                        batch_size=args.val_batch_size,
                                        image_size=args.image_size,
                                        upscale_factor=args.upscale_factor,
                                        crop_size=args.crop_size)
    print("val_div2k ", val_div2k_loader)

    # Prepare Networks #
    if args.model == 'edsr':

        edsr = EDSR(channels=args.channels,
                    features=args.dim,
                    num_residuals=args.num_residuals,
                    scale_factor=args.upscale_factor).to(device)

    else:
        raise NotImplementedError

    D = Discriminator(in_channels=args.channels,
                      ndf=args.dim,
                      linear_dim=args.linear_dim,
                      out_dim=args.out_dim,
                      disc_type=args.disc_type).to(device)

    if args.phase == 'train':
        if args.model == 'edsr':
            train_srcnns(train_div2k_loader, val_div2k_loader, edsr, device,
                         args)

    elif args.phase == 'inference':

        if args.model == 'edsr':
            edsr_weight_path = os.path.join(
                args.weights_path,
                '{}_Epoch_{}.pkl'.format(edsr.__class__.__name__,
                                         args.num_epochs))
            edsr.load_state_dict(torch.load(edsr_weight_path))
            inference(val_div2k_loader,
                      edsr,
                      args.upscale_factor,
                      args.num_epochs,
                      args.inference_path,
                      device,
                      save_combined=False)

    elif args.phase == 'generate':
        generate_all(val_div2k_loader, device, args)

    else:
        raise NotImplementedError
コード例 #22
0
ファイル: main_MIDL.py プロジェクト: wyk0517/SizeLoss_WSS
def runTraining():
    print('-' * 40)
    print('~~~~~~~~  Starting the training... ~~~~~~')
    print('-' * 40)

    # Batch size for training MUST be 1 in weakly/semi supervised learning if we want to impose constraints.
    batch_size = 1
    batch_size_val = 1
    lr = 0.0005
    epoch = 1000

    root_dir = './ACDC-2D-All'
    model_dir = 'model'

    transform = transforms.Compose([transforms.ToTensor()])

    mask_transform = transforms.Compose([transforms.ToTensor()])

    train_set = medicalDataLoader.MedicalImageDataset(
        'train',
        root_dir,
        transform=transform,
        mask_transform=mask_transform,
        augment=False,
        equalize=False)

    train_loader = DataLoader(train_set,
                              batch_size=batch_size,
                              num_workers=5,
                              shuffle=False)

    val_set = medicalDataLoader.MedicalImageDataset(
        'val',
        root_dir,
        transform=transform,
        mask_transform=mask_transform,
        equalize=False)

    val_loader = DataLoader(val_set,
                            batch_size=batch_size_val,
                            num_workers=5,
                            shuffle=False)

    minVal = 97.9
    maxVal = 1722.6
    minSize = torch.FloatTensor(1)
    minSize.fill_(np.int64(minVal).item())
    maxSize = torch.FloatTensor(1)
    maxSize.fill_(np.int64(maxVal).item())

    print("~~~~~~~~~~~ Creating the model ~~~~~~~~~~")
    num_classes = 2

    netG = ENet(1, num_classes)

    netG.apply(weights_init)
    softMax = nn.Softmax()
    Dice_loss = computeDiceOneHotBinary()

    modelName = 'WeaklySupervised_CE-2_b'

    print(' Model name: {}'.format(modelName))
    partial_ce = Partial_CE()
    mil_loss = MIL_Loss()
    size_loss = Size_Loss()

    if torch.cuda.is_available():
        netG.cuda()
        softMax.cuda()
        Dice_loss.cuda()

    optimizerG = torch.optim.Adam(netG.parameters(), lr=lr, betas=(0.5, 0.999))

    BestDice, BestEpoch = 0, 0

    dBAll = []
    Losses = []

    annotatedPixels = 0
    totalPixels = 0

    print(" ~~~~~~~~~~~ Starting the training ~~~~~~~~~~")
    print(' --------- Params: ---------')
    print(' - Lower bound: {}'.format(minVal))
    print(' - Upper bound: {}'.format(maxVal))
    for i in range(epoch):
        netG.train()
        lossVal = []
        lossVal1 = []

        totalImages = len(train_loader)
        for j, data in enumerate(train_loader):
            image, labels, weak_labels, img_names = data

            # prevent batchnorm error for batch of size 1
            if image.size(0) != batch_size:
                continue

            optimizerG.zero_grad()
            netG.zero_grad()

            MRI = to_var(image)
            Segmentation = to_var(labels)
            weakAnnotations = to_var(weak_labels)

            segmentation_prediction = netG(MRI)

            annotatedPixels = annotatedPixels + weak_labels.sum()
            totalPixels = totalPixels + weak_labels.shape[
                2] * weak_labels.shape[3]
            temperature = 0.1
            predClass_y = softMax(segmentation_prediction / temperature)
            Segmentation_planes = getOneHot_Encoded_Segmentation(Segmentation)
            segmentation_prediction_ones = predToSegmentation(predClass_y)

            # lossCE_numpy = partial_ce(segmentation_prediction, Segmentation_planes, weakAnnotations)
            lossCE_numpy = partial_ce(predClass_y, Segmentation_planes,
                                      weakAnnotations)

            # sizeLoss_val = size_loss(segmentation_prediction, Segmentation_planes, Variable(minSize), Variable(maxSize))
            sizeLoss_val = size_loss(predClass_y, Segmentation_planes,
                                     Variable(minSize), Variable(maxSize))

            # MIL_Loss_val = mil_loss(predClass_y, Segmentation_planes)

            # Dice loss (ONLY USED TO COMPUTE THE DICE. This DICE loss version does not work)
            DicesN, DicesB = Dice_loss(segmentation_prediction_ones,
                                       Segmentation_planes)
            DiceN = DicesToDice(DicesN)
            DiceB = DicesToDice(DicesB)

            Dice_score = (DiceB + DiceN) / 2

            # Choose between the different models
            # lossG = lossCE_numpy + MIL_Loss_val
            lossG = lossCE_numpy + sizeLoss_val
            # lossG = lossCE_numpy
            # lossG = sizeLoss_val

            lossG.backward(retain_graph=True)
            optimizerG.step()

            lossVal.append(lossG.data[0])
            lossVal1.append(lossCE_numpy.data[0])

            printProgressBar(
                j + 1,
                totalImages,
                prefix="[Training] Epoch: {} ".format(i),
                length=15,
                suffix=" Mean Dice: {:.4f}, Dice1: {:.4f} ".format(
                    Dice_score.data[0], DiceB.data[0]))

        deepSupervision = False
        printProgressBar(
            totalImages,
            totalImages,
            done=
            f"[Training] Epoch: {i}, LossG: {np.mean(lossVal):.4f}, lossMSE: {np.mean(lossVal1):.4f}"
        )

        Losses.append(np.mean(lossVal))
        d1, sizeGT, sizePred = inference(netG, temperature, val_loader,
                                         batch_size, i, deepSupervision,
                                         modelName, minVal, maxVal)

        dBAll.append(d1)

        directory = 'Results/Statistics/MIDL/' + modelName
        if not os.path.exists(directory):
            os.makedirs(directory)

        np.save(os.path.join(directory, modelName + '_Losses.npy'), Losses)
        np.save(os.path.join(directory, modelName + '_dBAll.npy'), dBAll)

        currentDice = d1

        print(" [VAL] DSC: (1): {:.4f} ".format(d1))
        # saveImagesSegmentation(netG, val_loader_save_imagesPng, batch_size_val_savePng, i, 'test', False)

        if currentDice > BestDice:
            BestDice = currentDice
            if not os.path.exists(model_dir):
                os.makedirs(model_dir)
            torch.save(netG,
                       os.path.join(model_dir, "Best_" + modelName + ".pkl"))

        if i % (BestEpoch + 10):
            for param_group in optimizerG.param_groups:
                param_group['lr'] = lr
コード例 #23
0
ファイル: train.py プロジェクト: mandiehyewon/CS576
            y = y.to(device)

            logit = model(x)
            loss_xy, loss_wh, loss_obj, loss_noobj, loss_class = compute_loss(
                logit, y)
            loss = (lambda_coord * (loss_xy + loss_wh) + loss_obj +
                    lambda_noobj * loss_noobj + loss_class) / batch_size
            valid_loss += loss

        valid_loss /= len(test_dloader)

    ckpt = {
        'model': model.state_dict(),
        'optimizer': optimizer.state_dict(),
        'epoch': epoch
    }
    torch.save(ckpt, ckpt_path)

VOC_CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
               'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
               'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
               'tvmonitor')

test_image_dir = 'test_images'
image_path_list = [
    os.path.join(test_image_dir, path) for path in os.listdir(test_image_dir)
]

for image_path in image_path_list:
    inference(model, image_path, device, VOC_CLASSES)
コード例 #24
0
ファイル: inference.py プロジェクト: sinnlosses/lstm_vae
    word2id_fname = f"{model_dir}/word2id.p"
    config_json = f"{model_dir}/config.json"
    save_sample_fname = f"{model_dir}/sampling_{weights}.csv"

    with open(word2id_fname, "rb") as fi:
        word_to_id, is_reversed = pickle.load(fi)
    id_to_word = {i: w for w, i in word_to_id.items()}

    with open(config_json, "r") as fi:
        config = json.load(fi)
    with open(model_fname, "r") as fi:
        model_json = fi.read()
    gen_model = model_from_json(model_json)
    gen_model.load_weights(weights_fname)

    n_samples = 100
    maxlen = int(config["maxlen"])
    latent_dim = int(config["latent_dim"])

    print('----- Generating text -----')
    surface_morph = []
    for n_sample in range(n_samples):
        sent_surface, sent_morph = inference(gen_model, maxlen, latent_dim,
                                             word_to_id, id_to_word,
                                             is_reversed)
        print(sent_surface)
        surface_morph.append([sent_surface, sent_morph])
    with open(save_sample_fname, "w") as fo:
        writer = csv.writer(fo)
        writer.writerows(surface_morph)