Beispiel #1
0
def init_and_load(args, params, run_mode):
    checkpoints_path = helper.compute_paths(args, params)['checkpoints_path']
    optim_step = args.last_optim_step
    model = init_model(args, params)

    if run_mode == 'infer':
        model, _, _, _ = helper.load_checkpoint(checkpoints_path, optim_step, model, None, resume_train=False)
        print(f'In [init_and_load]: returned model for inference')
        return model

    else:  # train
        optimizer = optim.Adam(model.parameters(), lr=params['lr'])
        print(f'In [init_and_load]: returned model and optimizer for training')
        model, optimizer, _, lr = helper.load_checkpoint(checkpoints_path, optim_step, model, optimizer, resume_train=True)
        return model, optimizer, lr
def main():
    input_args = get_predict_input_args()
    
    # Load checkpoint
    checkpoint, validation_accuracy = load_checkpoint(input_args.checkpoint_path)
        
    useGPU = input_args.gpu is not None
        
    # Build model
    model = build_model(checkpoint["arch"],
                        checkpoint["hidden_units_01"], 
                        checkpoint["hidden_units_02"], 
                        checkpoint)

    # Process image
    processed_image = process_image(input_args.image_path)
  
    # Predict topK
    topk = predict(processed_image, model, input_args.top_k, useGPU)
    
    # Show result
    with open(input_args.category_names_path, 'r') as f:
        cat_to_name = json.load(f)
    
    probs = topk[0][0].cpu().numpy()
    categories = [cat_to_name[str(category_index+1)] for category_index in topk[1][0].cpu().numpy()]
    
    for i in range(len(probs)):
        print("TopK {}, Probability: {}, Category: {}\n".format(i+1, probs[i], categories[i]))
Beispiel #3
0
def predict(model_path, image_path):

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = load_checkpoint(model_path).to(device)

    img_transforms = transforms.Compose([
        transforms.Resize((224, 336)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    image = img_transforms(Image.open(image_path)).unsqueeze(0).to(device)

    model.eval()
    # forward pass to get net output
    output_preds = model.forward(image)

    output_pred = output_preds.view(output_preds.size()[0], -1).to(device)

    # un-transform the predicted key_pts data
    predicted_output = output_pred.data.cpu()
    predicted_output = predicted_output.numpy()[0]
    predicted_output = abs(float(predicted_output))

    print('The predicted price of this shoe is ${} USD'.format(
        np.round(predicted_output, 2)))
def main():
    input_args = get_input_args()
    model, class_to_idx = helper.load_checkpoint(input_args.checkpoint)
    model.class_to_idx = class_to_idx
    #print(model)
    topk = input_args.top_k
    gpu = input_args.gpu
    image = input_args.image

    #to do: welche input args brauche ich wirklich mindestens?
    # process input image, then use the model - output the image, that should go into the model
    #-> is implemented within the predict function

    # remarks about predict function :
    #  define gpu
    # full path to image has to go in there
    #return classes probabilities from  prediction function
    [probs], classes = helper.predict(image, model, topk, gpu)
    #

    cat_to_name = helper.load_categories(input_args.category_names)
    # Output result (Hier werden die Classes und die wahrscheinlichkeiten rausgeschrieben)
    #print(cat_to_name, probs, classes)
    for a, b in zip(classes, probs):
        print("{} = {:.2f}%".format(cat_to_name[a], b * 100))
def main(args):

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    if 'vgg' in args.model_arch.lower():
        model = VGG(args.model_arch, True, args.dataset, 0, 3, 3)
    elif 'res' in args.model_arch.lower():
        model = ResNet_(args.model_arch, True, args.dataset, 0, 3, 3)

    my_dataset = Cifar10(args)

    model = model.to(device)

    #if torch.cuda.device_count() > 1:
    model = nn.DataParallel(model)

    my_dataset.get_loaders()

    model_path = args.resume
    model, _, _ = helper.load_checkpoint(args,
                                         model,
                                         optimizer=None,
                                         path=None)

    criterion = nn.CrossEntropyLoss()
    top_1_acc, _, _ = trainer.validate(my_dataset.test_loader, model,
                                       criterion, args)
Beispiel #6
0
def main():
    """
    Executing relevant functions
    """
    
    # Get Keyword Args for Prediction
    args = arg_parser()
    
    # Load categories to names json file
    with open(args.category_names, 'r') as f:
        	cat_to_name = json.load(f)

    # Load model trained with train.py
    model = load_checkpoint(args.checkpoint)
    print(model)
    # Process Image
    image = process_image(args.image_path)
    
    # Check for GPU
    device = check_gpu(gpu_arg=args.gpu);
    
    # Use `processed_image` to predict the top K most likely classes
    top_ps, top_class = predict(image, model, args.top_k)
    
    # Print out probabilities
    print_probability(top_class,cat_to_name, top_ps)
Beispiel #7
0
def predict(image_path, checkpoint, device, categories_to_names, topk=5):
    ''' Predict the class (or classes) of an image using a trained deep learning model.
    '''
    with open(categories_to_names, 'r') as f:
        cat_to_name = json.load(f)
    # TODO: Implement the code to predict the class from an image file
    img = helper.process_image(image_path)
    model, class_to_idx = helper.load_checkpoint(
        checkpoint)  #,optimizer=load_checkpoint(model)

    img = torch.Tensor(img)

    #unsqueezing recommended on torch discussion forums for error i was having at:
    #https://discuss.pytorch.org/t/expected-stride-to-be-a-single-integer-value-or-a-list-of-1-values-to-match-the-convolution-dimensions-but-got-stride-1-1/17140
    img = img.unsqueeze(0)

    img = img.to(device)
    model = model.to(device)
    model.eval()
    with torch.no_grad():
        log_prob = model(img)
        probabilities = torch.exp(log_prob)
        probs, classes = probabilities.topk(topk, dim=1)
    classes = classes.cpu()
    classes = classes.numpy()
    classification = {
        number: string
        for string, number in class_to_idx.items()
    }
    class_names = [cat_to_name[classification[item]] for item in classes[0]]
    probs = probs.cpu()
    probs = probs.numpy()
    return probs[0], class_names
def main():
    args = arg_parser()

    # Load model from Checkpoint
    model, _ = helper.load_checkpoint(args.checkpoint)

    # If GPU is specified,
    if args.device:
        model.to('cuda')

    # Check if image path is a complete path or local directory and amends the path if it's a local directory
    if args.path_to_image[:3] != 'C:\\':
        im_path = os.getcwd() + '\\' + args.path_to_image
    else:
        im_path = args.path_to_image

    with open(args.cat_to_name, 'r') as f:
        cat_to_name = json.load(f)

    # Run prediciton on passed image and print results
    lab_list, prob_list = predict(im_path, model, cat_to_name, 5)
    for i in range(len(prob_list)):
        print(
            f'Prediction {i+1}:     Species: {lab_list[i]}, Confidence in Prediction: {prob_list[i]}'
        )
def diverse_samples(args, params):
    """
    command:
    python3 main.py --local --run diverse --image_name strasbourg_000001_061472 --temp 0.9 --model improved_so_large --last_optim_step 276000 \
                    --img_size 512 1024 --dataset cityscapes --direction label2photo \
                    --n_block 4 --n_flow 10 10 10 10 --do_lu --reg_factor 0.0001 --grad_checkpoint
    """
    optim_step = args.last_optim_step
    temperature = args.temp
    n_samples = 10
    img_size = [512, 1024]
    n_blocks = args.n_block
    image_name = args.image_name

    image_cond_path = f'/local_storage/datasets/moein/cityscapes/gtFine_trainvaltest/gtFine/train/{image_name.split("_")[0]}/{image_name}_gtFine_color.png'
    image_gt_path = f'/local_storage/datasets/moein/cityscapes/leftImg8bit_trainvaltest/leftImg8bit/train/{image_name.split("_")[0]}/{image_name}_leftImg8bit.png'

    model_paths = helper.compute_paths(args, params)
    output_folder = os.path.join(model_paths['diverse_path'], image_name, f'temp={temperature}')
    print(f'out folder: {output_folder}')

    helper.make_dir_if_not_exists(output_folder)

    shutil.copyfile(image_cond_path, os.path.join(output_folder, 'segment.png'))
    shutil.copyfile(image_gt_path, os.path.join(output_folder, 'real.png'))
    print('Copy segment and real images: done')

    model = models.init_model(args, params)
    model = helper.load_checkpoint(model_paths['checkpoints_path'], optim_step, model, optimizer=None, resume_train=False)[0]

    step = 2
    for i in range(0, 10, step):
        paths_list = [os.path.join(output_folder, f'sample_{(i + 1) + j}.png') for j in range(step)]
        experiments.take_multiple_samples(model, n_blocks, temperature, step, img_size, image_cond_path, paths_list)
Beispiel #10
0
def transfer_content(args, params, content_file, condition_file, new_cond_file,
                     file_path):
    trans = helper.get_transform()
    content_image_batch = helper.remove_alpha_channel(
        trans(Image.open(content_file))).unsqueeze(0)
    cond_image_batch = helper.remove_alpha_channel(
        trans(Image.open(condition_file))).unsqueeze(0)
    new_cond_image_batch = helper.remove_alpha_channel(
        trans(Image.open(new_cond_file))).unsqueeze(0)

    model = models.init_model(args, params)
    model = helper.load_checkpoint(path_to_load='../checkpoints',
                                   optim_step=136000,
                                   model=model,
                                   optimizer=None,
                                   resume_train=False)[0]

    z_outs = model(x_a=cond_image_batch.clone(),
                   x_b=content_image_batch.clone())[1]['z_outs']
    new_image = model.reverse(x_a=new_cond_image_batch.clone(),
                              z_b_samples=z_outs,
                              reconstruct=True)

    utils.save_image(new_image[0].clone(), file_path, nrow=1, padding=0)
    print('saved the result at:', file_path)
def main():
    args = parse_args()
    model = load_checkpoint("./checkpoints/" + args.checkpoint)
    labels = label_mapper()

    probs, classes = predict(args.file_path, model, args.gpu, int(args.top_k))

    flowers = [labels[str(idx)] for idx in classes]

    for idx, flower in enumerate(flowers):
        print(f"{flower} with prediction of {probs[idx] * 100:.2f}%")
Beispiel #12
0
def run_training(args, params):
    # print run info
    helper.print_info(args, params, model=None, which_info='params')

    # setting comet tracker
    tracker = None
    if args.use_comet:
        tracker = init_comet(args, params)
        print("In [run_training]: Comet experiment initialized...")

    if 'dual_glow' in args.model:
        models.train_dual_glow(args, params, tracker)
    else:
        model = models.init_model(args, params)
        optimizer = optim.Adam(model.parameters())
        reverse_cond = data_handler.retrieve_rev_cond(args,
                                                      params,
                                                      run_mode='train')
        train_configs = trainer.init_train_configs(args)

        # resume training
        if args.resume_train:
            optim_step = args.last_optim_step
            checkpoints_path = helper.compute_paths(args,
                                                    params)['checkpoints_path']
            model, optimizer, _, lr = load_checkpoint(checkpoints_path,
                                                      optim_step, model,
                                                      optimizer)

            if lr is None:  # if not saved in checkpoint
                lr = params['lr']
            trainer.train(args,
                          params,
                          train_configs,
                          model,
                          optimizer,
                          lr,
                          tracker,
                          resume=True,
                          last_optim_step=optim_step,
                          reverse_cond=reverse_cond)
        # train from scratch
        else:
            lr = params['lr']
            trainer.train(args,
                          params,
                          train_configs,
                          model,
                          optimizer,
                          lr,
                          tracker,
                          reverse_cond=reverse_cond)
Beispiel #13
0
def sample_trained_c_flow(args, params):
    checkpt_pth = params['checkpoints_path']['real'][args.cond_mode][
        args.model]
    # could also use the init_and_load function
    model = models.init_model(args, params, run_mode='infer')
    model, _, _ = load_checkpoint(checkpt_pth, args.last_optim_step, model,
                                  None, False)  # loading the model

    if args.conditional:
        sample_c_flow_conditional(args, params, model)

    if args.syn_segs:
        syn_new_segmentations(args, params, model)
def main():
    model = helper.load_checkpoint(path)
    with open('cat_to_name.json', 'r') as json_file:
        cat_to_name = json.load(json_file)
    probabilities = helper.predict(path_image, model, topk, power,
                                   number_of_outputs)
    labels = [
        cat_to_name[str(index + 1)] for index in np.array(probabilities[1][0])
    ]
    probability = np.array(probabilities[0][0])
    i = 0
    while i < number_of_outputs:
        print("{} with a probability of {}".format(labels[i], probability[i]))
        i += 1
    print("Predicting Done!")
Beispiel #15
0
def prepare_experiment(params, args, device, exp_name):
    checkpoint_pth = params['checkpoints_path'][
        'conditional']  # always conditional
    optim_step = args.last_optim_step
    save_path = params['samples_path']['conditional'] + f'/{exp_name}'
    make_dir_if_not_exists(save_path)

    # init model and load checkpoint
    model = init_glow(params)
    model, _, _ = load_checkpoint(checkpoint_pth,
                                  optim_step,
                                  model,
                                  None,
                                  resume_train=False)

    return model, save_path
def predict(model):
    if (model == None):
        model = helper.load_checkpoint()

    device = helper.get_device()

    image_tensor, image = helper.get_image_tensor(
        '\nPlease enter the path of the image you want to analyse\n')
    image_tensor = image_tensor.to(device)
    topk = helper.get_int(
        '\nPlease enter how many to the top predictions you want to see (topk)\n'
    )

    model = model.to(device)

    print('\nPredicting\n')

    with torch.no_grad():
        output = model.forward(image_tensor)

    ps = torch.exp(output)

    topK_ps = torch.topk(ps, topk)

    probs = topK_ps[0].cpu().numpy().squeeze()
    sorted_ps_label_keys = topK_ps[1].cpu().numpy().squeeze()
    classes = []

    print('Sorted label keys {}'.format(sorted_ps_label_keys))

    try:
        get_label = lambda x: idx_to_class[str(x)]
        for i in sorted_ps_label_keys[0:topk]:
            classes.append(get_label(i))

    except NameError:
        print(
            '\nCaught Key Error idx_to_class does not exist\nUsing normal keys\n'
        )
        for i in sorted_ps_label_keys[0:topk]:
            classes.append(i)

    print('\nFinished predicting\n')

    helper.view_classify(image, probs, classes)
    return
Beispiel #17
0
def new_condition(img_list, params, args, device):
    checkpoint_pth = params['checkpoints_path'][
        'conditional']  # always conditional
    optim_step = args.last_optim_step
    save_path = params['samples_path']['conditional'] + f'/new_condition'
    make_dir_if_not_exists(save_path)

    # init model and load checkpoint
    model = init_glow(params)
    model, _, _ = load_checkpoint(checkpoint_pth,
                                  optim_step,
                                  model,
                                  None,
                                  resume_train=False)

    for img_num in img_list:
        all_sampled = []
        img, label = get_image(img_num,
                               params['data_folder'],
                               args.img_size,
                               ret_type='batch')

        # get the latent vectors of the image
        forward_cond = (args.dataset, label)
        _, _, z_list = model(
            img, forward_cond
        )  # get the latent vectors corresponding to the style of the chosen image

        for digit in range(10):
            new_cond = ('mnist', digit, 1)
            # pass the new cond along with the extracted latent vectors
            # apply it to a new random image with another condition (another digit)
            sampled_img = model.reverse(z_list,
                                        reconstruct=True,
                                        coupling_conds=new_cond)
            all_sampled.append(
                sampled_img.squeeze(dim=0)
            )  # removing the batch dimension (=1) for the sampled image
            print(f'In [new_condition]: sample with digit={digit} done.')

        utils.save_image(all_sampled,
                         f'{save_path}/img={img_num}.png',
                         nrow=10)
        print(f'In [new_condition]: done for img_num {img_num}')
Beispiel #18
0
def main():
    args = parse_args()
    gpu = args.gpu
    model = load_checkpoint(args.checkpoint)
    cat_to_name = load_cat_names(args.category_names)

    img_path = args.filepath
    probs, classes = predict(img_path, model, int(args.top_k), gpu)
    labels = [cat_to_name[str(index)] for index in classes]
    probability = probs
    print('File selected: ' + img_path)

    print(labels)
    print(probability)

    i = 0  # this prints out top k classes and probs
    while i < len(labels):
        print("{} with a probability of {}".format(labels[i], probability[i]))
        i += 1  # cycle through
def main():

    # Get input arguments
    args = arg_parser()

    # Check if GPU is available
    device = helper.check_gpu(args.gpu)
    print('Using {} for computation.'.format(device))

    # Load the model from checkpoint
    model = helper.load_checkpoint(args.checkpoint_path)
    print("Model has been loaded from the checkpoint.")
    print("You will get predictions in a bit...")

    # Get predictions for the chosen image
    top_probs, top_labels, top_flowers = helper.predict(
        model, args.img_path, args.top_k)

    # Print top n probabilities
    helper.print_probability(top_flowers, top_probs)
def main():

    input_args = get_input_args()
    gpu = torch.cuda.is_available() and input_args.gpu
    print("Predicting on {} using {}".format("GPU" if gpu else "CPU",
                                             input_args.checkpoint))

    model = helper.load_checkpoint(input_args.checkpoint)

    if gpu:
        model.cuda()

    use_mapping_file = False

    if input_args.cat_names:
        with open(input_args.cat_names, 'r') as f:
            cat_to_name = json.load(f)
            use_mapping_file = True

    probs, classes = helper.predict(input_args.input, model, gpu,
                                    input_args.top_k)

    for i in range(input_args.top_k):
        print("probability of class {}: {}".format(classes[i], probs[i]))
Beispiel #21
0
def predict(model):

    if (model == None):
        model = helper.load_checkpoint()

    image_path = helper.get_file_path(
        '\nPlease enter the path of the image you want to analyse\n')
    topk = helper.get_int(
        '\nPlease enter how many to the top predictions you want to see (topk)\n'
    )

    device = helper.get_device()
    model = helper.load_device(model)

    image_tensor = helper.process_image(image_path).to(device)
    idx_to_class = helper.get_idx_to_class()
    print('\nPredicting\n')

    with torch.no_grad():
        output = model.forward(image_tensor)

    ps = torch.exp(output)

    topK_ps = torch.topk(ps, topk)

    probs = topK_ps[0].cpu().numpy().squeeze()
    sorted_ps_label_keys = topK_ps[1].cpu().numpy().squeeze()
    get_label = lambda x: idx_to_class[str(x)]

    classes = []

    for i in sorted_ps_label_keys[0:topk]:
        classes.append(get_label(i))

    print('\nFinished predicting\n')
    return probs, classes
Beispiel #22
0
    'checkpoint',
    default='/home/workspace/aipnd-project/checkpointlearnrate1.pth',
    nargs='*',
    action="store",
    type=str)
ap.add_argument('--top_k', default=5, dest="top_k", action="store", type=int)
ap.add_argument('--category_names',
                dest="category_names",
                action="store",
                default='cat_to_name.json')
ap.add_argument('--gpu', default="gpu", action="store", dest="gpu")

pa = ap.parse_args()
img_path = pa.input_img
num_outputs = pa.top_k
power = pa.gpu
input_img = pa.input_img
path = pa.checkpoint

trainloader, vldtnloader, testloader = helper.load_data()
helper.load_checkpoint(path)
with open('cat_to_name.json', 'r') as json_file:
    cat_to_name = json.load(json_file)
p = helper.predict(img_path, model, num_outputs, power)
labels = [cat_to_name[str(index + 1)] for index in np.array(p[1][0])]
probability = np.array(p[0][0])
i = 0
while i < num_outputs:
    print("{} has a probability of {}".format(labels[i], probability[i]))
    i += 1
print("finished")
def train_model(cust_model,
                dataloaders,
                criterion,
                optimizer,
                num_epochs,
                scheduler=None):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    start_time = time.time()
    val_acc_history = []
    best_acc = 0.0
    best_model_wts = copy.deepcopy(cust_model)
    best_optimizer_wts = optim.Adam(best_model_wts.parameters(), lr=0.0001)
    best_optimizer_wts.load_state_dict(optimizer.state_dict())
    start_epoch = args["lastepoch"] + 1
    if (start_epoch > 1):
        filepath = "./checkpoint_epoch" + str(args["lastepoch"]) + ".pth"
        #filepath="ResNet34watershedplus_linknet_50.pt"
        cust_model, optimizer = load_checkpoint(cust_model, filepath)
        #cust_model = load_model(cust_model,filepath)
    for epoch in range(start_epoch - 1, num_epochs, 1):
        print("Epoch {}/{}".format(epoch + 1, num_epochs))
        print("_" * 15)
        for phase in ["train", "valid"]:
            if phase == "train":
                cust_model.train()
            if phase == "valid":
                cust_model.eval()
            running_loss = 0.0
            jaccard_acc = 0.0
            jaccard_acc_inter = 0.0
            jaccard_acc_contour = 0.0
            dice_loss = 0.0

            for input_img, labels, inter, contours in tqdm(
                    dataloaders[phase], total=len(dataloaders[phase])):
                #input_img = input_img.cuda() if use_cuda else input_img
                #labels = labels.cuda() if use_cuda else labels
                #inter = inter.cuda() if use_cuda else inter
                input_img = input_img.to(device)
                labels = labels.to(device)
                inter = inter.to(device)
                contours = contours.to(device)
                label_true = torch.cat([labels, inter, contours], 1)
                #label_true=labels
                optimizer.zero_grad()

                with torch.set_grad_enabled(phase == "train"):
                    out = cust_model(input_img)
                    #preds = torch.sigmoid(out)
                    preds = out
                    #print(preds.shape)
                    loss = criterion(preds, label_true)
                    loss = loss.mean()

                    if phase == "train":
                        loss.backward()
                        optimizer.step()
                running_loss += loss.item() * input_img.size(0)
                #print(labels.shape)
                #preds=torch.FloatTensor(preds)
                #print(preds)
                preds = torch.cat(preds)  #for multiGPU
                #print(preds.shape)

                jaccard_acc += jaccard(
                    labels.to('cpu'), torch.sigmoid(preds.to('cpu'))
                )  # THIS IS THE ONE THAT STILL IS ACCUMULATION IN ONLY ONE GPU
                jaccard_acc_inter += jaccard(inter.to('cpu'),
                                             torch.sigmoid(preds.to('cpu')))
                jaccard_acc_contour += jaccard(contours.to('cpu'),
                                               torch.sigmoid(preds.to('cpu')))

                #dice_acc += dice(labels, preds)

            epoch_loss = running_loss / len(dataloaders[phase])
            print("| {} Loss: {:.4f} |".format(phase, epoch_loss))
            aver_jaccard = jaccard_acc / len(dataloaders[phase])
            aver_jaccard_inter = jaccard_acc_inter / len(dataloaders[phase])
            aver_jaccard_contour = jaccard_acc_contour / len(
                dataloaders[phase])
            #aver_dice = dice_acc / len(dataloaders[phase])
            #print("| {} Loss: {:.4f} | Jaccard Average Acc: {:.4f} | ".format(phase, epoch_loss, aver_jaccard))
            print(
                "| {} Loss: {:.4f} | Jaccard Average Acc: {:.4f} | Jaccard Average Acc inter: {:.4f}  | Jaccard Average Acc contour: {:.4f}| "
                .format(phase, epoch_loss, aver_jaccard, aver_jaccard_inter,
                        aver_jaccard_contour))
            print("_" * 15)
            if phase == "valid" and aver_jaccard > best_acc:
                best_acc = aver_jaccard
                best_acc_inter = aver_jaccard_inter  ## aver_jaccard_inter
                best_epoch_loss = epoch_loss
                #best_model_wts = copy.deepcopy(cust_model.state_dict)
                best_model_wts = copy.deepcopy(cust_model)
                best_optimizer_wts = optim.Adam(best_model_wts.parameters(),
                                                lr=0.0001)
                best_optimizer_wts.load_state_dict(optimizer.state_dict())
            if phase == "valid":
                val_acc_history.append(aver_jaccard)
        print("^" * 15)
        save_checkpoint(best_model_wts, best_optimizer_wts, epoch + 1,
                        best_epoch_loss, best_acc, best_acc_inter)
        print(" ")
        scheduler.step()
    time_elapsed = time.time() - start_time
    print("Training Complete in {:.0f}m {:.0f}s".format(
        time_elapsed // 60, time_elapsed % 60))
    #print("Best Validation Accuracy: {:.4f}".format(best_acc))
    #este no#best_model_wts = copy.deepcopy(cust_model.state_dict())
    cust_model.load_state_dict(best_model_wts.state_dict())
    return cust_model, val_acc_history
Beispiel #24
0
def sanity_check(image_path):

    probs, labels = predict(image_path, model, args.top_k)

    ps = [x for x in probs.cpu().detach().numpy()[0]]
    npar = [x for x in labels.cpu().numpy()[0]]
    names = list()

    inv_mapping = {v: k for k, v in model.class_to_idx.items()}

    for i in npar:
        names.append(cat_to_name[str(inv_mapping[i])])

    h.imshow(h.process_image(image_path), ax=plt.subplot(2, 1, 1))
    plt.title(names[0])

    plt.subplot(2, 1, 2)
    sb.barplot(y=names, x=ps, color=sb.color_palette()[0])
    plt.show()


# predicting flower name
cat_to_name = h.label_mapping('cat_to_name.json')

# loading model from checkpoint
model, optimizer = h.load_checkpoint(args.checkpoint)
device = "cuda:0" if (args.gpu and torch.cuda.is_available()) else "cpu"

# predicting image
sanity_check(args.input)
Beispiel #25
0
print(model)
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                       args.lr)
best_loss = sys.maxsize

param_dict = helper.count_parameters(model)
print('number of trainable parameters = ',
      numpy.sum(list(param_dict.values())))

if args.cuda:
    model = model.cuda()

if args.resume:
    if os.path.isfile(args.resume):
        print("=> loading checkpoint '{}'".format(args.resume))
        checkpoint = helper.load_checkpoint(args.resume)
        args.start_epoch = checkpoint['epoch']
        best_loss = checkpoint['best_loss']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            args.resume, checkpoint['epoch']))
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

# ###############################################################################
# # Train the model
# ###############################################################################

train = train.Train(model, optimizer, dictionary, args, best_loss)
train.train_epochs(train_corpus, dev_corpus, args.start_epoch, args.epochs)
Beispiel #26
0
                beta1=0.5, beta2=0.999, epsilon=1e-08).minimize(train_outs_dict['transporter_cost'], var_list=transport_vars, global_step=global_step)
            
    helper.variable_summaries(train_outs_dict['generator_cost'], '/generator_cost')
    helper.variable_summaries(train_outs_dict['discriminator_cost'], '/discriminator_cost')
    helper.variable_summaries(train_outs_dict['transporter_cost'], '/transporter_cost')
    init = tf.global_variables_initializer()
    saver = tf.train.Saver()
    sess = tf.InteractiveSession()
    merged_summaries = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter(global_args.exp_dir+'/summaries', sess.graph)
    sess.run(init)

    if global_args.restore:
        print("=> Loading checkpoint: '{}'".format(global_args.global_exp_dir+global_args.restore_dir))
        try: 
            helper.load_checkpoint(saver, sess, global_args.global_exp_dir+global_args.restore_dir)  
            print("=> Loaded checkpoint: '{}'".format(global_args.global_exp_dir+global_args.restore_dir))
        except: print("=> FAILED to load checkpoint: '{}'".format(global_args.global_exp_dir+global_args.restore_dir))

    def train(epoch):
        if data_loader.__module__ == 'datasetLoaders.RandomManifoldDataLoader' or data_loader.__module__ == 'datasetLoaders.ToyDataLoader':
            if epoch < 10: critic_rate, generator_rate = 1, 4
            else: critic_rate, generator_rate = 1, 4
        else:
            if epoch < 10: critic_rate, generator_rate = 2, 2
            else: critic_rate, generator_rate = 2, 2

        trans_steps, disc_steps, gen_steps = 0, 0, 0
        turn = 'gen'
        in_between_vis = 3
        report_count = 0
Beispiel #27
0
print('------- Selected Options -------')
print('img_path        = {}'.format(image_path))
print('checkpoint_path = {}'.format(checkpoint_path))
print('top_k           = {}'.format(top_k))
print('category_names  = {}'.format(category_names))
print('gpu             = {}'.format(gpu))
print('--------------------------------')

# Load Categories
with open('cat_to_name.json', 'r') as f:
    cat_to_name = json.load(f)

# Load checkpoint
device = 'cuda' if gpu else 'cpu'
model, criterion, optimizer, class_labels = helper.load_checkpoint(
    device, checkpoint_path)

# Process image
image = helper.process_image(image_path)
image_tensor = torch.from_numpy(image).type(torch.FloatTensor)
image_tensor.resize_([1, 3, 224, 224])

model.eval()
model.to(device)
image_tensor = image_tensor.to(device)

result = model(image_tensor)
result = torch.exp(result)

probs, idx = result.topk(top_k)
probs.detach_()
Beispiel #28
0
def interpolate(cond_config,
                interp_config,
                params,
                args,
                device,
                mode='conditional'):
    # image and label are of type 'batch'
    img_index1, img_index2, rev_cond = cond_config['img_index1'], cond_config[
        'img_index2'], cond_config['reverse_cond']
    img1, label1 = get_image(img_index1,
                             params['data_folder'],
                             args.img_size,
                             ret_type='batch')
    img2, label2 = get_image(img_index2,
                             params['data_folder'],
                             args.img_size,
                             ret_type='batch')

    checkpoint_pth = params['checkpoints_path'][mode]
    optim_step = args.last_optim_step
    save_path = params['samples_path'][mode] + f'/interp'
    make_dir_if_not_exists(save_path)

    # init model and load checkpoint
    model = init_glow(params)
    model, _, _ = load_checkpoint(checkpoint_pth,
                                  optim_step,
                                  model,
                                  None,
                                  resume_train=False)

    # assumption: the two images are of the same condition (label), so I am only using label1
    forward_cond = (args.dataset, label1)

    _, _, z_list1 = model(img1, forward_cond)
    _, _, z_list2 = model(img2, forward_cond)

    z_diff = [z_list2[i] - z_list1[i] for i in range(len(z_list1))]

    coeff = 0
    steps = interp_config['steps']
    all_sampled = []

    for step in range(steps + 1):
        if interp_config['type'] == 'limited':
            coeff = step / steps  # this is the increment factor: e.g. 1/5, 2/5, ..., 5/5
        else:
            coeff = step * interp_config['increment']

        if interp_config['axis'] == 'all':  # full interpolation in all axes
            z_list_inter = [
                z_list1[i] + coeff * z_diff[i] for i in range(len(z_diff))
            ]

        else:  # interpolation in only the fist axis and keeping others untouched
            axis = 0 if interp_config[
                'axis'] == 'z1' else 1 if interp_config['axis'] == 'z2' else 2
            # print(f'{interp_config["axis"]} shape: {z_list1[axis].shape}')
            # input()
            z_list_inter = [z_list1[i] for i in range(len(z_list1))
                            ]  # deepcopy not available for these tensors
            z_list_inter[axis] = z_list1[axis] + coeff * z_diff[axis]

        sampled_img = model.reverse(z_list_inter,
                                    reconstruct=True,
                                    coupling_conds=rev_cond).cpu().data
        all_sampled.append(sampled_img.squeeze(dim=0))
        # make naming consistent and easy to sort
        coeff_name = '%.2f' % coeff if interp_config[
            'type'] == 'limited' else round(coeff, 2)
        print(f'In [interpolate]: done for coeff {coeff_name}')

    utils.save_image(
        all_sampled,
        f'{save_path}/{img_index1}-to-{img_index2}_[{interp_config["axis"]}].png',
        nrow=10)
Beispiel #29
0
                    dest="category_names",
                    action="store",
                    default='cat_to_name.json')
parser.add_argument('--gpu', default="gpu", action="store", dest="gpu")

pa = parser.parse_args()
image_path = pa.input_img
topk = pa.top_k
gpu_cpu = pa.gpu
input_img = pa.input_img
filepath = pa.checkpoint

training_loader, testing_loader, validation_loader = hp.load_data()

# load previously saved checkpoint
hp.load_checkpoint(filepath)

# load label conversion
with open('cat_to_name.json', 'r') as json_file:
    cat_to_name = json.load(json_file)

probabilities = hp.predict(image_path, model, topk, gpu_cpu)

top5_values = np.array(probabilities[0][0])
top5_value_categories = [
    cat_to_name[str(i)] for i in np.array(probabilities[1][0])
]

i = 0
while i < topk:
    print("{} is the category with a {} probability".format(
cwd = os.getcwd()
import json
from helper import process_image, build_model, load_checkpoint
from TrainTestPredictFunc import predict
import argparse

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Image Classification application')
    parser.add_argument(dest="image_path", default ="flowers/test/28/image_05230.jpg" ,action="store",help = "provide path to image, the path should look similar to this 'flowers/test/28/image_05230.jpg'")
    parser.add_argument(dest = "checkpoint",default = "ImageClassifier/final_checkpoint.pth", action="store",help = "paste in the checkpoint saved from training, default: ImageClassifier/final_checkpoint.pth")
    parser.add_argument("--top_k",dest = "top_k",default = 5, type=int, action="store",help = "how many most likely classes to display, default = 5")
    parser.add_argument("--device",dest = 'device', action="store", default='cpu',help = "device for prediction,default cpu")
    parser.add_argument("-c""--category_names",dest = "category_names",default = "cat_to_name.json", action="store",help = "the file to map categories to real names, default: 'cat_to_name.json' located on the working directory")
                        
    args = parser.parse_args()
    loaded_model = load_checkpoint(args.checkpoint)
    top_probs, top_class_label = predict(args.image_path, loaded_model, args.top_k, args.device)
    max_probs = max(top_probs)
    
    im_tensor = process_image(args.image_path)
    with open(args.category_names, 'r') as f:
         cat_to_name = json.load(f)
    img_label = cat_to_name[args.image_path.split('/')[-2]]
    print('***Prediction Results***')
    print('*top_probs:', top_probs)
    print('*top_class_label"', top_class_label)
    top_flowers = [cat_to_name[label] for label in top_class_label]
    print('*top_flowers:', top_flowers)
    print("*max class probability:", top_probs[0],
          "*predicted class label:", top_class_label[0],
          "*predicted flower:", top_flowers[0])