Exemple #1
0
def run():
    # model_name = "baseline"
    model_name = "baseline_bs_20_lr_1e-2_adagrad"
    # model_name = "baseline_bs_20_lr_1e-2_ep_8"
    # model_name = "baseline_bs_20_lr_1e-2_ep_7"
    # Parameters
    batch_size = 100

    # setup the device for running
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = resnet_18()
    model.load_state_dict(torch.load("models/good_models/{model_name}".format(model_name=model_name)))
    model = model.to(device)

    test_loader = dataset.get_test_loader(batch_size)
    num_test_batches = len(test_loader)

    model.eval()
    eval_filename = "{dir}/results-{model_name}.txt".format(dir=dir_path, model_name=model_name)
    with torch.no_grad():
        with open(eval_filename, "w") as file:
            img_id = 0
            for batch_num, (inputs, labels) in enumerate(test_loader, 1):
                inputs = inputs.to(device)
                outputs = model(inputs)
                top5 = outputs.topk(5)[1]

                for i in range(inputs.size()[0]):
                    img_id += 1
                    file.write("test/{name}.jpg {first} {second} {third} {fourth} {fifth}\n".format(name=str(img_id).zfill(8), first=top5[i,0], second=top5[i,1], third=top5[i,2], fourth=top5[i,3], fifth=top5[i,4]))

                print(batch_num*1.0/num_test_batches)
Exemple #2
0
def predict(args, model):
    """Entrypoint for predict mode"""

    test_loader = dataset.get_test_loader(args)
    train_loader, val_loader = dataset.get_train_val_loader(args, predict=True)

    if args.fp16:
        model = amp.initialize(model, opt_level='O1')

    logging.info('Starting prediction')

    output = {}
    for k, loader in [('test', test_loader), ('val', val_loader)]:
        output[k] = {}
        res = infer(args, model, loader)

        for i, v in res.items():
            d = loader.dataset.data[i]
            name = '{}_{}_{}'.format(d[0], d[1], d[2])
            if name not in output[k]:
                output[k][name] = []
            output[k][name].append(v)

    logging.info('Saving predictions to {}'.format(args.load + '.output' +
                                                   args.pred_suffix))
    with open(args.load + '.output' + args.pred_suffix, 'wb') as file:
        pickle.dump(output, file)
Exemple #3
0
def test():
    

    input_image = tf.placeholder(tf.float32, [None, None, None, 3])
    result_image = inference(input_image, name='generator') 
    
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.99)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    saver = tf.train.Saver()
    
    if not os.path.exists('results'):
        os.mkdir('results')
        
    with tf.device('/device:GPU:0'):

        data_dir = 'data_loacation_in_your_computer'
        dataloader = get_test_loader(data_dir)

        sess.run(tf.global_variables_initializer())
        saver.restore(sess, tf.train.latest_checkpoint('saved_models'))
        
        for idx, batch in tqdm(enumerate(dataloader)):
            result = sess.run([result_image], feed_dict={input_image: batch[0]})
            result = np.squeeze(result)*255
            result = np.clip(result, 0, 255).astype(np.float32)
            ground_truth = np.squeeze(batch[1])
            save_out_path = os.path.join('results', '{}.jpg'.format(str(idx).zfill(4)))
            save_gt_path = os.path.join('results', '{}_gt.jpg'.format(str(idx).zfill(4)))
            cv2.imwrite(save_out_path, result)
            cv2.imwrite(save_gt_path, ground_truth)
Exemple #4
0
def test():
    # setup the device for running
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = Net()
    model.load_state_dict(torch.load(path + "_" + str(learning_rate)))
    model = model.to(device)

    test_loader = dataset.get_test_loader(batch_size)
    num_test_batches = len(test_loader)

    correct = 0
    total = 0
    model.eval()
    with torch.no_grad():
        for batch_num, (inputs, labels) in enumerate(test_loader, 1):
            inputs = inputs.view(inputs.size(0), inputs.size(3),
                                 inputs.size(1), inputs.size(2))
            inputs, labels = inputs.to(device), labels.to(device)
            print("test batch_num: ", batch_num)

            outputs = model(inputs).to(device)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            accuracy = (100 * correct / total)

            if batch_num % test_output_period == 0:
                print('[%d:%.2f] accuracy: %d %%' %
                      (1, batch_num * 1.0 / num_test_batches, accuracy))
Exemple #5
0
def predict(args, model):
    """Entrypoint for predict mode"""

    test_loader = dataset.get_test_loader(args)
    train_loader, val_loader = dataset.get_train_val_loader(args, predict=True)

    if args.fp16:
        model = amp.initialize(model, opt_level="O1")

    logging.info("Starting prediction")

    output = {}
    for k, loader in [("test", test_loader), ("val", val_loader)]:
        output[k] = {}
        res = infer(args, model, loader)

        for i, v in res.items():
            d = loader.dataset.data[i]
            name = "{}_{}_{}".format(d[0], d[1], d[2])
            if name not in output[k]:
                output[k][name] = []
            output[k][name].append(v)

    logging.info(
        "Saving predictions to {}".format(args.load + ".output" + args.pred_suffix)
    )
    with open(args.load + ".output" + args.pred_suffix, "wb") as file:
        pickle.dump(output, file)
Exemple #6
0
 def __init__(self, input_csv, model_dirs, output_csv='./results/results.csv'):
     model = load_ensemble_from_dirs(model_dirs)
     self.model = model
     self.model_dirs = model_dirs
     self.output_csv = output_csv
     self.input_csv = input_csv
     self.dataloader = get_test_loader(input_csv)
     self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
     self.model.to(self.device)
     self.raw_results = None
     self.result_df = None
Exemple #7
0
                               lr=opt.learning_rate,
                               weight_decay=opt.weight_decay)
        criterion = nn.BCELoss().cuda()
        # start to run a training
        run_train(model, train_loader, val_loader, opt, criterion)
        # make prediction on validation set
        predictions, img_ids = run_test(model, val_loader, opt)
        # compute IOU between prediction and ground truth masks
        compute_iou(predictions, img_ids, val_loader)
        # SAVE model
        if opt.save_model:
            torch.save(model.state_dict(),
                       os.path.join(opt.checkpoint_dir, 'model-01.pt'))
    else:
        # load testing data for making predictions
        test_loader = get_test_loader(opt.test_dir,
                                      batch_size=opt.batch_size,
                                      shuffle=opt.shuffle,
                                      num_workers=opt.num_workers,
                                      pin_memory=opt.pin_memory)
        # load the model and run test
        model.load_state_dict(
            torch.load(os.path.join(opt.checkpoint_dir, 'model-01.pt')))
        if opt.n_gpu > 1:
            model = nn.DataParallel(model)
        if opt.is_cuda:
            model = model.cuda()
        predictions, img_ids = run_test(model, test_loader, opt)
        # run length encoding and save as csv
        encode_and_save(predictions, img_ids)
Exemple #8
0
        raise ValueError("Missing configuration file ...")
    else:
        with open(args.config_file) as config_file:
            config = json.load(config_file)

    data_dir = config['data_dir']

    # Set up GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = str(config['gpu_id'])
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Set up model
    model = get_resnet18().to(device)
    model.eval()

    test_loader = get_test_loader(data_dir=data_dir, batch_size=50)
    classifier = PyTorchClassifier(
        model=model,
        loss=nn.CrossEntropyLoss(),
        input_shape=(3, 32, 32),
        nb_classes=10,
        optimizer=None,
        clip_values=(0, 1),
    )

    attack = ProjectedGradientDescentPyTorch(
        estimator=classifier,
        norm=np.inf,
        eps=config['epsilon'],
        eps_step=config['step_size'],
        max_iter=config['num_steps'],
def test_it(MODEL_PATH='roberta-base'):
    models = []
    for t in os.listdir('type'):
        for model_file in os.listdir(os.path.join('type', t)):
            model = TweetModel(MODEL_PATH=t)
            # model.cuda()
            model.cpu()
            model.load_state_dict(
                torch.load(os.path.join(os.path.join('type', t), model_file)))
            model.eval()
            models.append(model)

    test_df = pd.read_csv('data/test.csv')
    test_df['text'] = test_df['text'].astype(str)
    test_loader = get_test_loader(test_df, MODEL_PATH=MODEL_PATH)
    predictions = []

    for data in test_loader:
        ids = data['ids'].cuda()
        masks = data['masks'].cuda()
        tweet = data['tweet']
        offsets = data['offsets'].numpy()
        sentiment = data['sentiment']

        start_logits = []
        end_logits = []
        # len_logits = []
        for model in models:
            with torch.no_grad():
                model.cuda()
                output = model(ids, masks)
                start_logits.append(
                    torch.softmax(output[0], dim=1).cpu().detach().numpy())
                end_logits.append(
                    torch.softmax(output[1], dim=1).cpu().detach().numpy())
                # len_logits.append(torch.softmax(output[2], dim=1).cpu().detach().numpy())
                model.cpu()
        start_logits = np.mean(start_logits, axis=0)
        end_logits = np.mean(end_logits, axis=0)
        # len_logits = np.mean(len_logits, axis=0)
        for i in range(len(ids)):
            start_pred = np.argmax(start_logits[i])
            end_pred = np.argmax(end_logits[i])
            # length = np.argmax(len_logits[i])
            # end_pred = start_pred + int(length)
            sentiment_val = sentiment[i]
            original_tweet = tweet[i]
            if start_pred > end_pred:
                pred = original_tweet
            else:
                pred = get_selected_text(tweet[i], start_pred, end_pred,
                                         offsets[i])
            if sentiment_val == "neutral" or len(original_tweet.split()) < 2:
                pred = original_tweet
            predictions.append(pred)

    sub_df = pd.read_csv('data/sample_submission.csv')
    sub_df['selected_text'] = predictions
    sub_df['selected_text'] = sub_df['selected_text'].apply(
        lambda x: x.replace('!!!!', '!') if len(x.split()) == 1 else x)
    sub_df['selected_text'] = sub_df['selected_text'].apply(
        lambda x: x.replace('..', '.') if len(x.split()) == 1 else x)
    sub_df['selected_text'] = sub_df['selected_text'].apply(
        lambda x: x.replace('...', '.') if len(x.split()) == 1 else x)
    sub_df.to_csv('submission.csv', index=False)
    sub_df.head()
def check_validation(CropStage=False,
                     TestStage=True,
                     chooseTH=True,
                     toMask=True):
    root_path = '/media/totem_disk/totem/weitang/MyProject'
    #
    model = smp.Unet('resnet34', activation=None).cuda()
    # model = U_Net2(img_ch=3, output_ch=1).cuda()
    # dir_model = root_path + '/U_Net2-30-0.1000-5-0.2494.pkl'
    dir_model = root_path + '/unet_resnet34_1_1_best.pth'
    model.load_state_dict(torch.load(dir_model)['state_dict'])
    # model.load_state_dict(torch.load(dir_model))

    dataset_path = '/media/totem_disk/totem/weitang/data_handlabel/sample_abnormal_256'
    path_list = glob.glob(dataset_path + '/*png')
    images_path_list = copy.deepcopy(path_list)
    masks_path_list = []
    for p in path_list:
        if 'mask' in p:
            masks_path_list.append(p)
            images_path_list.remove(p)

    val_name_list = ['1024531', '1024624', '1023965']
    val_files = find_image(images_path_list, val_name_list)
    val_masks_files = find_image(masks_path_list, val_name_list)
    for i in range(len(val_files)):
        images_path_list.remove(val_files[i])
        masks_path_list.remove(val_masks_files[i])
    test_path_list = val_files
    # 裁切测试集路径
    print("Total {} images for testing".format(len(test_path_list)))

    prob_save_path = root_path + '/temp_data/prob'
    crop_predict = root_path + '/temp_data/crop_predict'
    if TestStage == True:
        print("Stage 1: ")
        #predict cropped images
        os.makedirs(crop_predict, exist_ok=True)
        test_loader = get_test_loader(test_path_list,
                                      image_size=512,
                                      batch_size=1)
        test(test_loader, crop_predict, model=model)

    mask_save_path = root_path + '/temp_data/mask/'
    mask_ori_path = val_masks_files
    if chooseTH == True:
        bestTH, pixelTH, dice_score = choose_threshold(crop_predict,
                                                       mask_ori_path)
    else:
        bestTH = 0.05
        pixelTH = 5
    if toMask == True:
        print("Stage 4: ")
        #convert probs to masks
        os.makedirs(mask_save_path, exist_ok=True)
        prob_to_mask(prob_save_path,
                     mask_save_dir=mask_save_path,
                     th=bestTH,
                     pixelth=pixelTH)

    dice = calculate_dice(glob.glob(mask_save_path + '*png'), mask_ori_path)
    dice_sorted = sorted(dice.items(), key=lambda item: item[1])
    sorted_name = []
    for i in dice_sorted:
        sorted_name.append(i[0])
    lowest10 = sorted_name[:10]
    highest10 = sorted_name[-10:]
    image_dir = '/media/totem_disk/totem/weitang/competition/trainData/image'
    visualize(lowest10,
              image_dir,
              mask_ori_path,
              mask_save_path,
              save_name='lowest_dice',
              dice=dice)
    visualize(highest10,
              image_dir,
              mask_ori_path,
              mask_save_path,
              save_name='highest_dice',
              dice=dice)
def predict_test(CropStage=False,
                 TestStage=True,
                 toMask=True,
                 toZip=True,
                 Statistic=True,
                 newTH=0.05):

    root_path = '/media/totem_disk/totem/weitang/MyProject'
    #
    model = smp.Unet('resnet34', activation=None).cuda()
    dir_model = root_path + '/unet_resnet34_1_1_best.pth'
    model.load_state_dict(torch.load(dir_model)['state_dict'])
    val_name_list = ['1024531', '1024624', '1023965']
    svs_list = glob.glob('/media/totem_disk/totem/weitang/data_handlabel/*svs')
    test_path_list = find_image(svs_list, val_name_list)
    crop_images_path = '/media/totem_disk/totem/weitang/MyProject/temp_data/crop_image'

    print("Total {} images for testing".format(len(test_path_list)))
    # 裁切程序
    if CropStage == True:
        print("Stage 1: ")
        #crop images
        crop_svs(test_path_list, crop_images_path, image_size=512, step=256)
    crop_images_path_list = glob.glob(
        '/media/totem_disk/totem/weitang/MyProject/temp_data/crop_image/*png')

    crop_predict = root_path + '/temp_data_test/crop_predict'
    if TestStage == True:
        print("Stage 2: ")
        #predict cropped images
        test_images_path_list = crop_images_path_list
        os.makedirs(crop_predict, exist_ok=True)
        test_loader = get_test_loader(test_images_path_list,
                                      image_size=512,
                                      batch_size=4)
        test(test_loader, crop_predict, model=model)

    mask_save_path = root_path + '/temp_data_test/mask/'
    if toMask == True:
        print("Stage 4: ")
        #convert probs to masks
        os.makedirs(mask_save_path, exist_ok=True)
        prob_to_mask(prob_save_path,
                     mask_save_dir=mask_save_path,
                     th=newTH,
                     pad_white=True)

    crop_preds_path_list = glob.glob(
        '/media/totem_disk/totem/weitang/MyProject/temp_data_test/crop_predict/*png'
    )
    crop_pred_toimage_path = '/media/totem_disk/totem/weitang/MyProject/temp_data_test/crop_predict_image'
    mask_to_image(crop_preds_path_list,
                  image_dir=crop_images_path,
                  save_dir=crop_pred_toimage_path)
    data_xml_list = glob.glob(
        r'/media/totem_disk/totem/weitang/data_handlabel/*.xml')
    if Statistic == True:
        result = statistic(crop_preds_path_list, data_xml_list, val_name_list)
        print(result)
    if toZip == True:
        print("Stage 5: ")
        #zip masks
        zf = zipfile.ZipFile(f'{root_path}/result/result.zip', 'w')
        for i in glob.glob(f"{mask_save_path}/*.png"):
            basename = os.path.split(i)[1]
            zf.write(i, f'result/{basename}')
        zf.close()
for i, (xs, ys) in enumerate(tester):
    xs, ys = xs.cuda(), ys.cuda()
    _, out = model(xs, ys, schedule=1.0)
    #out = model.batch_beam(xs,beam_num)
    if i % 3 == 1:
        print('gen/true', i, ':')
        idxLis = np.random.choice(len(out), 3)
        for gen, truth in zip(
                utils.resolve_caption(out[idxLis], name, False, True, True),
                utils.resolve_caption(ys[idxLis], name, False, False, True)):
            print("gen: ", gen)
            print('truth: ', truth)

import BLEU
import dataset
# Eval
print('BLEU:')
total, score = 0, 0
bleu = BLEU.BLEU(name)
evalTester = dataset.get_test_loader(name, batch_size)
for i, (idLis, xs) in enumerate(evalTester):
    xs = xs.cuda()
    _, ys = model(xs, None, schedule=1)
    #ys = model.batch_beam(xs,beam_num)
    for i, y in zip(idLis, ys.cpu()):
        score += bleu(i, torch.argmax(y, 1))
        #score += bleu(i, y)
        total += 1
score /= total
print(score)
Exemple #13
0
                    help='number of model output channels')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--epoch',
                    type=int,
                    default=24,
                    help='start number of epochs to train for')
parser.add_argument('--checkpoint_dir',
                    default='checkpoints',
                    help="path to saved models (to continue training)")

args = parser.parse_args()

device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
net = Model(args.num_classes)

data_loader = get_test_loader(args.data_dir, args.image_size, args.batch_size)

net.load_state_dict(
    torch.load(os.path.join(f'{args.checkpoint_dir}', f'{args.epoch}.pth'),
               map_location=device))
file = open('output.csv', 'w')
file.write('id,label\n')

for indexs, images in tqdm(data_loader):
    with torch.no_grad():
        images: torch.Tensor = images.to(device)
        indexs: torch.Tensor = indexs
        preds = net.predict_image(images)

        for i in range(len(indexs)):
            file.write(f'{indexs[i]},{preds[i].item()}\n')
Exemple #14
0
def train(args, model):
    train_loader, val_loader = dataset.get_train_val_loader(args)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0,
                                 weight_decay=args.wd)

    if args.horovod:
        optimizer = hvd.DistributedOptimizer(
            optimizer,
            named_parameters=model.named_parameters(),
            backward_passes_per_step=args.gradient_accumulation)
        hvd.broadcast_parameters(model.state_dict(), root_rank=0)
        hvd.broadcast_optimizer_state(optimizer, root_rank=0)

    if args.fp16:
        model, optimizer = amp.initialize(model, optimizer, opt_level='O1')

    if args.load is not None:
        best_acc = score(args, model, val_loader)
    else:
        best_acc = float('-inf')

    if args.mode == 'val':
        return

    if args.pl_epoch is not None:
        test_loader = dataset.get_test_loader(args, exclude_leak=True)
        pl_data = set()

    for epoch in range(args.start_epoch, args.epochs):
        if args.pl_epoch is not None:
            pseudo_label(args, epoch, pl_data, model, val_loader, test_loader,
                         train_loader)

        with torch.no_grad():
            avg_norm = np.mean([v.norm().item() for v in model.parameters()])

        logging.info('Train: epoch {}   avg_norm: {}'.format(epoch, avg_norm))

        model.train()
        optimizer.zero_grad()

        cum_loss = 0
        cum_acc = 0
        cum_count = 0
        tic = time.time()
        for i, (X, S, _, Y) in enumerate(train_loader):
            lr = get_learning_rate(args, epoch + i / len(train_loader))
            for g in optimizer.param_groups:
                g['lr'] = lr

            X = X.cuda()
            S = S.cuda()
            Y = Y.cuda()
            X, S, Y = transform_input(args, X, S, Y)

            loss, acc = model.train_forward(X, S, Y)
            if args.fp16:
                '''
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
                    optimizer.synchronize()

                if (i + 1) % args.gradient_accumulation == 0:
                    with optimizer.skip_synchronize():
                        optimizer.step()
                    optimizer.zero_grad()
                '''
                apply_grads = (i + 1) % args.gradient_accumulation == 0
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
                    if hasattr(optimizer, "synchronize") and apply_grads:
                        optimizer.synchronize()
                if apply_grads:
                    if hasattr(optimizer, "skip_synchronize"):
                        with optimizer.skip_synchronize():
                            optimizer.step()
                    optimizer.zero_grad()
            else:
                loss.backward()
                if (i + 1) % args.gradient_accumulation == 0:
                    optimizer.step()
                    optimizer.zero_grad()

            cum_count += 1
            cum_loss += loss.item()
            cum_acc += acc
            if (i + 1) % args.disp_batches == 0:
                logging.info(
                    'Epoch: {:3d} Iter: {:4d}  ->  speed: {:6.1f}   lr: {:.9f}   loss: {:.6f}   acc: {:.6f}'
                    .format(epoch, i + 1,
                            cum_count * args.batch_size / (time.time() - tic),
                            optimizer.param_groups[0]['lr'],
                            cum_loss / cum_count, cum_acc / cum_count))
                cum_loss = 0
                cum_acc = 0
                cum_count = 0
                tic = time.time()

        acc = score(args, model, val_loader)
        torch.save(model.state_dict(), str(args.save + '.{}'.format(epoch)))
        if acc >= best_acc:
            best_acc = acc
            logging.info('Saving best to {} with score {}'.format(
                args.save, best_acc))
            torch.save(model.state_dict(), str(args.save))
Exemple #15
0
def train(args, model):
    train_loader, val_loader = dataset.get_train_val_loader(args)

    optimizer = torch.optim.Adam(model.parameters(), lr=0, weight_decay=args.wd)

    if args.fp16:
        model, optimizer = amp.initialize(model, optimizer, opt_level="O1")

    if args.load is not None:
        best_acc = score(args, model, val_loader)
    else:
        best_acc = float("-inf")

    if args.mode == "val":
        return

    if args.pl_epoch is not None:
        test_loader = dataset.get_test_loader(args, exclude_leak=True)
        pl_data = set()

    for epoch in range(args.start_epoch, args.epochs):
        if args.pl_epoch is not None:
            pseudo_label(
                args, epoch, pl_data, model, val_loader, test_loader, train_loader
            )

        with torch.no_grad():
            avg_norm = np.mean([v.norm().item() for v in model.parameters()])

        logging.info("Train: epoch {}   avg_norm: {}".format(epoch, avg_norm))

        print(f'GRAD: {torch.is_grad_enabled()}')
        model.train()
        optimizer.zero_grad()

        cum_loss = 0
        cum_acc = 0
        cum_count = 0
        tic = time.time()
        for i, (X, S, _, Y) in enumerate(train_loader):
            lr = get_learning_rate(args, epoch + i / len(train_loader))
            for g in optimizer.param_groups:
                g["lr"] = lr

            X = X.cuda()
            S = S.cuda()
            Y = Y.cuda()
            X, S, Y = transform_input(args, X, S, Y)

            loss, acc = model.train_forward(X, S, Y)
            if args.fp16:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()
            if (i + 1) % args.gradient_accumulation == 0:
                optimizer.step()
                optimizer.zero_grad()

            cum_count += 1
            cum_loss += loss.item()
            cum_acc += acc
            if (i + 1) % args.disp_batches == 0:
                logging.info(
                    "Epoch: {:3d} Iter: {:4d}  ->  speed: {:6.1f}   lr: {:.9f}   loss: {:.6f}   acc: {:.6f}".format(
                        epoch,
                        i + 1,
                        cum_count * args.batch_size / (time.time() - tic),
                        optimizer.param_groups[0]["lr"],
                        cum_loss / cum_count,
                        cum_acc / cum_count,
                    )
                )
                cum_loss = 0
                cum_acc = 0
                cum_count = 0
                tic = time.time()

        acc = score(args, model, val_loader)
        torch.save(model.state_dict(), str(args.save + ".{}".format(epoch)))
        if acc >= best_acc:
            best_acc = acc
            logging.info("Saving best to {} with score {}".format(args.save, best_acc))
            torch.save(model.state_dict(), str(args.save))