Example #1
0
 def __init__( self, size=224 ):
     global net
     self.size = size
     self.disable_grads( net )
     self.checkpoint_path = "/home/vipul/Affine/Vision/classification/train/checkpoint"
     self.checkpoint_name = "checkpoint.pth.tar"
     load_checkpoint( net, os.path.join( self.checkpoint_path, self.checkpoint_name ) )
Example #2
0
def main():
    args = parse_args()
    loader = load_val(config.val_path, args, distributed=False)

    model = darknet().eval()
    #model = torchvision.models.resnet101( pretrained=True ).eval()

    checkpoint_path = os.path.join(config.checkpoint_path,
                                   config.checkpoint_name)
    load_checkpoint(model, checkpoint_path)
    validate(loader, model, args)
Example #3
0
def main():

    model = load_checkpoint("model.pth")

    testdata_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])

    images_list = []
    random.shuffle(images_list)
    index = 49

    for i in os.listdir("new_images"):
        if i != ".DS_Store":
            images_list.append("new_images/" + i)
            print(i)

    for img in images_list:
        infer_boundingbox(img, model, testdata_transform, index)
        index += 1
Example #4
0
# prepare test loader for the test set
test_file = args.data_path + args.test_file
test_data = ArticlesDataset(csv_file=test_file, vocab=vocab, label2id=label2id, max_text_len=args.text_len)
test_loader = DataLoader(test_data, batch_size=args.batch_size, shuffle=False)

scores_dict = {'f1': [], 'recall': [], 'precision': [], 'confidence': []}

for run_num in range(args.num_runs):
    model_run_name = model_name + "_run"+str(run_num+1)
    print("-"*10, "Run", run_num+1, "-"*10)
    print("Model name:", model_run_name)
    print("Loading model from", save_path + model_run_name + ".pt")

    best_model = CNN(cnn_args=cnn_args, mlp_args=mlp_args).to(device)
    optimizer = torch.optim.Adam(best_model.parameters(), lr=0.005)
    load_checkpoint(save_path + model_run_name + ".pt", best_model, optimizer, device, log_file)

    results = evaluate(best_model, test_loader)
    scores_dict['f1'].append(results['f1'])
    scores_dict['recall'].append(results['recall'])
    scores_dict['precision'].append(results['precision'])

    # if args.save_confidence is True:
    #     scores_dict['confidence'].append(results['confidence'])
    #     scores_dict['labels'].append(results['labels'])
    #     scores_dict['content'].append(results['content'])
    #     sentence_encodings = results['sentence_encodings']


scores_filename = save_path + model_name + "_test_scores.json"
scores_file = open(scores_filename, 'w')
Example #5
0
    precision = precision_score(y_true, y_pred, average='macro')
    log(f"macro Recall: {recall}")
    log(f"macro Precision: {precision}")

    results = {'f1': macro_f1, 'recall': recall, 'precision': precision}

    return results


if __name__ == "__main__":

    #val_XT, val_yT = read_dataset("datafiles/val_enc.csv")
    test_XT, test_yT = read_dataset("datafiles/test_enc.csv")

    EMBEDDING_DIM = test_XT.shape[1]
    OUTPUT_DIM = test_yT.shape[1]
    BATCH_SIZE = 128

    #model = FFNN(EMBEDDING_DIM, OUTPUT_DIM)
    model = FFNN_DEEP(EMBEDDING_DIM, OUTPUT_DIM)
    optimizer = optim.Adam(model.parameters(), lr=1e-3)

    test_custom_loader = CustomLoader(test_XT, test_yT)
    test_loader = DataLoader(test_custom_loader,
                             batch_size=BATCH_SIZE,
                             shuffle=True)

    load_checkpoint(f"datafiles/{MODEL_NAME}.pt", model, optimizer, DEVICE,
                    open(LOG_FILE, "a"))
    results = evaluate(model, test_loader)
    #log(results)