Пример #1
0
def main():
    dataset = SequenceDataset("/home/ai/Data/kitti_formatted")
    loader, _, _ = utils.data_loaders(dataset, 1.0, 0.0, 0.0, 4, 1)

    writer = SummaryWriter()
    config_3d = { "camera": { "cls": "PerspectiveCamera", "fov": 75, "near": 0.1, "far": 5000.0 } }
    
    for i, batch in enumerate(loader):
        tgt, refs, K, Kinv = batch
        
        s = tgt.shape[-1]*tgt.shape[-2]
        verts = np.random.random((1, s, 3)) * 1.0
        colors = np.repeat(np.array([[[255, 0, 0]]]), s, 1)
        
        grid = torchvision.utils.make_grid(tgt, nrow=2)

        loss = 1/(0.01*i+1)
        
        writer.add_image("tgt", img_tensor=grid, global_step=i)
        writer.add_mesh("cloud", vertices=verts, colors=colors, config_dict=config_3d, global_step=i)
        writer.add_scalar("loss", scalar_value=loss, global_step=i)
        
        time.sleep(3.0)

    writer.close()
Пример #2
0
def main(args):

    posterior_samples_list = [1, 10, 100]

    ### load BNN and data

    dataset, model = saved_BNNs["model_" + str(args.model_idx)]
    batch_size = 5000 if model["inference"] == "hmc" else 128

    _, test_loader, inp_shape, out_size = \
        data_loaders(dataset_name=dataset, batch_size=128, n_inputs=args.n_inputs, shuffle=False)

    bnn = BNN(dataset, *list(model.values()), inp_shape, out_size)
    bnn.load(device=args.device, rel_path=TESTS)
    filename = bnn.name

    ### compute loss gradients

    for posterior_samples in posterior_samples_list:
        loss_gradients(net=bnn,
                       n_samples=posterior_samples,
                       savedir=filename + "/",
                       data_loader=test_loader,
                       device=args.device,
                       filename=filename)
Пример #3
0
def main():
    train_data, validation_data, test_data = utils.data_transforms(pa.data_dir)
    trainloader, validationloader, testloader = utils.data_loaders(pa.data_dir)

    model, criterion, optimizer = utils.network_setup(
        pa.architecture, pa.dropout, pa.input_units, pa.hidden_units,
        pa.learning_rate, pa.device)

    utils.network_training(model, trainloader, validationloader, criterion,
                           optimizer, pa.epochs, pa.print_every, pa.device)

    utils.save_checkpoint(model, train_data, optimizer, pa.architecture,
                          pa.dropout, pa.input_units, pa.hidden_units,
                          pa.learning_rate, pa.epochs, pa.save_dir)

    print("Finished training!")
Пример #4
0
def main():
    dataset = SequenceDataset("/home/ai/Data/kitti_formatted")
    loader, _, _ = utils.data_loaders(dataset, 1.0, 0.0, 0.0, 4, 1)

    wandb.init(project="sfmnet")

    for i, batch in enumerate(loader):
        tgt, refs, K, Kinv = batch
        #wandb.log({ "batch": wandb.Video((tgt*255).byte(), fps=1, format="webm") }, step=i)
        pos = np.random.random((tgt.shape[-1] * tgt.shape[-2], 3)) * 100.0
        color = np.random.randint(0, 256, (tgt.shape[-1] * tgt.shape[-2], 3))
        cloud = np.concatenate((pos, color), axis=1)
        print(cloud.shape)
        wandb.log(
            {
                "batch": [wandb.Image(img) for img in tgt],
                "loss": 1 / (0.01 * i + 1),
                "cloud": wandb.Object3D(cloud)
            },
            step=i)
        time.sleep(3.0)
def main():
    train_data, validation_data, test_data = utils.data_transforms(pa.data_dir)
    trainloader, validationloader, testloader = utils.data_loaders(pa.data_dir)

    model = utils.load_checkpoint(pa.save_dir)

    with open(pa.category_names) as json_file:
        cat_to_name = json.load(json_file)

    probs, classes = utils.predict(pa.image_path, model, pa.topk, pa.device)

    probs = probs.type(torch.FloatTensor).to('cpu').numpy()
    classes = classes.type(torch.FloatTensor).to('cpu').numpy()
    classes = classes.astype(int)
    classes = classes.astype(str)

    class_names = [cat_to_name[i] for i in classes[0]]

    print(probs)
    print(classes)
    print(class_names)

    print("Finsihed predicting!")
                    type=int,
                    dest="hidden_units",
                    action="store",
                    default=4096)
parser.add_argument('--gpu', dest="gpu", action="store", default="gpu")

parse = parser.parse_args()
data_dir = parse.data_dir
save_dir = parse.save_dir
arch = parse.arch
learn_rate = parse.learning_rate
epochs = parse.epochs
hidden_units = parse.hidden_units
gpu = parse.gpu

trainloader, validloader, testloader, train_data = utils.data_loaders(data_dir)
model, input_size = utils.build_network(arch)
print(learn_rate)

model, optimizer = utils.trainer(trainloader,
                                 validloader,
                                 model,
                                 epochs=epochs,
                                 steps=0,
                                 learnrate=learn_rate,
                                 print_every=5,
                                 gpu=gpu)

utils.save_checkpoint(model,
                      train_data,
                      optimizer,
Пример #7
0
    # load model
    model = load_model(args.model)

    eps = args.eps
    if args.dataset == 'mnist':
        mean = normalization['mnist'][0]
        std = [normalization['mnist'][1] for _ in mean]
        x = input_shape['mnist']
        x_op = tf.placeholder(tf.float32, shape=(None, x[0], x[1], x[2],))
    elif args.dataset == 'cifar':
        mean = normalization['cifar'][0]
        std = [normalization['cifar'][1] for _ in mean]
        x = input_shape['cifar']
        x_op = tf.placeholder(tf.float32, shape=(None, x[0], x[1], x[2],))
    train_loader, test_loader = data_loaders(args.dataset, args.batch_size,
                                             shuffle_test=False, norm_mean=mean, norm_std=std)

    sess = tf.Session(config=config)

    tf_model = convert_pytorch_model_to_tf(model)
    ch_model = CallableModelWrapper(tf_model, output_layer='logits')

    if args.attack == 'CW':
        attk = CarliniWagnerL2(ch_model, sess=sess)
        params = {'binary_search_steps': 10,
                  'max_iterations': 100,
                  'learning_rate': 0.2,
                  'batch_size': args.batch_size,
                  'initial_const': 10}
    elif args.attack == 'PGD':
        attk = ProjectedGradientDescent(ch_model, sess=sess)