def predict(args): # get only the grayscale image that is stored in the blue channel img = Image.open(args.image_path) if img.mode == 'RGB': print("Only taking blue channel from RGB image") img = img.split()[2] elif img.mode == 'L': print("Provided image is already grayscale") transform = transforms.Compose([transforms.Resize(256), transforms.ToTensor(), transforms.Normalize(mean=[0.485], std=[0.229])]) # img = img.split()[2] img_t = transform(img) batch_t = torch.unsqueeze(img_t, 0) model = Model() checkpoint = torch.load(args.model_weights) model.load_state_dict(checkpoint['state_dict']) model.eval() out = model(batch_t) _, preds = torch.max(out, 1) print('Result: ', args.all_labels[preds.item()])
def get_gesture_model(weights_path): model = Model(42, 32, 5) if torch.cuda.is_available(): model.load_state_dict(torch.load(weights_path)) model = model.cuda() else: model.load_state_dict( torch.load(weights_path, map_location=lambda storage, loc: storage)) model.eval() return model
def main(): print("Abhishek Kumar") print("15648") print("CSA") parser = argparse.ArgumentParser() parser.add_argument("--test-data", default='test_input.txt', type=str, help="Name of the test file") args = parser.parse_args() model_path='model/model.bin' model = Model(h=4) device = "cpu" model.to(device) eval_dataset,w,data = read_test_data(args.test_data) model.load_state_dict(torch.load(model_path)) evaluate(model, eval_dataset,device,w,data)
def main(): print(check_output(["nodejs", "--version"])) torch.backends.cudnn.benchmark = True # Increasing `repeat` will generate more cached files transform = CacheNPY(prefix="b64_", repeat=1, transform=torchvision.transforms.Compose([ ToMesh(random_rotations=True, random_translation=0.1), ProjectOnSphere(bandwidth=64) ])) transform = KeepName(transform) resdir = "test_perturbed" dataset, perturbed = resdir.split("_") perturbed = (perturbed == "perturbed") test_set = Shrec17("data", dataset, perturbed=perturbed, download=True, transform=transform) loader = torch.utils.data.DataLoader(test_set, batch_size=16, shuffle=False, num_workers=4, pin_memory=True, drop_last=False) model = Model(55) model.cuda() model.load_state_dict(torch.load("state.pkl")) if os.path.isdir(resdir): shutil.rmtree(resdir) os.mkdir(resdir) predictions = [] ids = [] for batch_idx, data in enumerate(loader): model.eval() if dataset != "test": data = data[0] file_names, data = data data = data.cuda() data = torch.autograd.Variable(data, volatile=True) predictions.append(model(data).data.cpu().numpy()) ids.extend([x.split("/")[-1].split(".")[0] for x in file_names]) print("[{}/{}] ".format(batch_idx, len(loader))) predictions = np.concatenate(predictions) ex = np.exp(predictions - np.max(predictions, axis=1, keepdims=True)) softmax = ex / np.sum(ex, axis=1, keepdims=True) predictions_class = np.argmax(predictions, axis=1) for i in range(len(ids)): print("{}/{} ".format(i, len(ids)), end="\r") idfile = os.path.join(resdir, ids[i]) retrieved = [(softmax[j, predictions_class[j]], ids[j]) for j in range(len(ids)) if predictions_class[j] == predictions_class[i]] retrieved = sorted(retrieved, reverse=True) threshold = 0 retrieved = [i for prob, i in retrieved if prob > threshold] with open(idfile, "w") as f: f.write("\n".join(retrieved)) url = "https://shapenet.cs.stanford.edu/shrec17/code/evaluator.zip" file_path = "evaluator.zip" r = requests.get(url, stream=True) with open(file_path, 'wb') as f: for chunk in r.iter_content(chunk_size=16 * 1024**2): if chunk: # filter out keep-alive new chunks f.write(chunk) f.flush() zip_ref = zipfile.ZipFile(file_path, 'r') zip_ref.extractall(".") zip_ref.close() print(check_output(["nodejs", "evaluate.js", "../"], cwd="evaluator"))
model = Model(momentum=params["MOMENTUM"], nesterov=params["NESTEROV"], learn_rate=params["LEARN_RATE"], learn_rate_decay=params["LR_DECAY"], sig_class_weight=params["SIG_WT"], bkg_class_weight=params["BKG_WT"], threshold=params["THRESHOLD"], optimizer=params["OPT"], loss_fn=loss_fn, output_fn=output_fn, layers=params["LAYERS"], nodes=params["NODES"], dropout=params["DROPOUT"], activation=params["ACTIVATION"], input_size=len(params["FEATURES"]), id_dict=type2id, save_tb_logs=params["SAVE_TB_LOGS"], save_metrics=params["METRICS"], save_wt_metrics=params["WT_METRICS"]) dataset.prepare_data() dataset.setup("test") test_dataset = dataset.test_dataloader() training_metrics = model.metrics model.load_state_dict(torch.load(params["LOAD_DIR"])['state_dict'], strict=False) final_logs(model, test_dataset, params["THRESHOLD"], output_fn, type2id, gpus, None, params["LOG_DIR"])
#!/usr/bin/python import json import os import torch from torch.autograd import Variable from train import Model, encode current_dir = os.path.dirname(__file__) model_file = os.path.join(current_dir, 'SplitModel.pt') data_file = os.path.join(current_dir, 'data.json') output_file = os.path.join(current_dir, 'prediction.json') model = Model() model.load_state_dict(torch.load(model_file)) with open(data_file) as f: examples = json.load(f) data = Variable(torch.stack([encode(p['points']) for p in examples])) logits = model(data) prediction = [] for example, probs in zip(examples, logits): prediction.append([ i for i, prob in enumerate(list(probs)) if float(prob) >= 0.5 and i < len(example['points']) ]) with open(output_file, 'w') as f: json.dump(prediction, f)