def __init__(self, env, args):
     # define some important
     self.env = env
     self.args = args
     # trying to define the network
     self.net = Net(self.env.action_space.n)
     self.target_net = Net(self.env.action_space.n)
     # make sure the target net has the same weights as the network
     self.target_net.load_state_dict(self.net.state_dict())
     if self.args.cuda:
         self.net.cuda()
         self.target_net.cuda()
     # define the optimizer
     self.optimizer = torch.optim.Adam(self.net.parameters(),
                                       lr=self.args.lr)
     # define the replay memory
     self.buffer = replay_memory(self.args.buffer_size)
     # define the linear schedule of the exploration
     self.exploration_schedule = linear_schedule(int(self.args.total_timesteps * self.args.exploration_fraction), \
                                                 self.args.final_ratio, self.args.init_ratio)
     # create the folder to save the models
     if not os.path.exists(self.args.save_dir):
         os.mkdir(self.args.save_dir)
     # set the environment folder
     self.model_path = os.path.join(self.args.save_dir, self.args.env_name)
     if not os.path.exists(self.model_path):
         os.mkdir(self.model_path)
Exemplo n.º 2
0
    def __init__(self,
                 n_s,
                 n_a,
                 hiddens=(128, 64),
                 epsilon=1.0,
                 epsilon_min=0.005,
                 epsilon_decay=0.05,
                 gamma=0.99,
                 batch_size=64,
                 memory_capacity=100000,
                 lr=0.001,
                 is_dueling=False,
                 is_prioritize=True,
                 replace_iter=100,
                 is_soft=False,
                 tau=0.01,
                 e=0.01,
                 a=0.6,
                 b=0.4):
        self.n_s = n_s
        self.n_a = n_a
        self.epsilon = epsilon
        self.epsilon_min = epsilon_min
        self.epsilon_decay = epsilon_decay
        self.replace_iter = replace_iter
        self.lr = lr
        self.gamma = gamma
        self.batch_size = batch_size
        self.memory_capacity = memory_capacity
        self.is_soft = is_soft
        self.is_prioritize = is_prioritize
        self.tau = tau
        if use_gpu:
            self.eval_net = Net(n_s,
                                n_a,
                                hiddens=hiddens,
                                is_dueling=is_dueling).cuda()
            self.target_net = Net(n_s,
                                  n_a,
                                  hiddens=hiddens,
                                  is_dueling=is_dueling).cuda()
        else:
            self.eval_net = Net(n_s,
                                n_a,
                                hiddens=hiddens,
                                is_dueling=is_dueling)
            self.target_net = Net(n_s,
                                  n_a,
                                  hiddens=hiddens,
                                  is_dueling=is_dueling)
        if is_prioritize:
            self.memory = Memory(memory_capacity, e, a, b)
        else:
            self.memory = np.zeros((memory_capacity, self.n_s * 2 + 2))
        self.memory_count = 0
        self.learn_count = 0

        self.loss_func = nn.MSELoss()
        self.optimizer = optim.Adam(self.eval_net.parameters(), lr=self.lr)
 def __init__(self, envs, args):
     self.envs = envs
     self.args = args
     # define the network
     self.net = Net(self.envs.action_space.n)
     if self.args.cuda:
         self.net.cuda()
     # define the optimizer
     self.optimizer = torch.optim.RMSprop(self.net.parameters(),
                                          lr=self.args.lr,
                                          eps=self.args.eps,
                                          alpha=self.args.alpha)
     if not os.path.exists(self.args.save_dir):
         os.mkdir(self.args.save_dir)
     # check the saved path for envs..
     self.model_path = self.args.save_dir + self.args.env_name + '/'
     if not os.path.exists(self.model_path):
         os.mkdir(self.model_path)
     # get the obs..
     self.batch_ob_shape = (self.args.num_processes * self.args.nsteps,
                            ) + self.envs.observation_space.shape
     self.obs = np.zeros(
         (self.args.num_processes, ) + self.envs.observation_space.shape,
         dtype=self.envs.observation_space.dtype.name)
     self.obs[:] = self.envs.reset()
     self.dones = [False for _ in range(self.args.num_processes)]
Exemplo n.º 4
0
def main(args):
	# GET DATA
	transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
	dataset = datasets.ImageFolder(args.train_dir, transform=transform)

	# GET MODEL
	model = Net(n_class=10)

	# TRAINING
	print('Start Training')
	with mlflow.start_run():

		for k, v in vars(args).items():
			mlflow.log_param(k, v)

		for epoch in range(args.epochs):  # loop over the dataset multiple times
			train_size = int(.8 * len(dataset))
			test_size = len(dataset) - train_size
			train_set, val_set = torch.utils.data.random_split(dataset, [train_size, test_size])
			trainloader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True)
			valloader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, shuffle=False)

			train(epoch, model, trainloader)
			test(epoch, model, valloader)


		print('Finished Training')
		mlflow.pytorch.log_model(model, artifact_path="pytorch-model", pickle_module=pickle)
		print("\nThe model is logged at:\n%s" % os.path.join(mlflow.get_artifact_uri(), "pytorch-model"))
Exemplo n.º 5
0
def training(share_exp, oracle):
    env_config = env_configs[args.env]
    env = Terrain(env_config.map_index)

    policy = []
    value = []
    oracle_network = {}
    for i in range(env.num_task):
        policy_i = StochasticPiNet(env_config.Policy)
        policy.append(policy_i)
        value_i = Net(env_config.Value)
        value.append(value_i)

    for i in range(env.num_task - 1):
        for j in range(i + 1, env.num_task):
            oracle_network[i, j] = StochasticPiNet(env_config.Z)

    multitask_agent = MultitaskPolicy(env=env,
                                      env_config=env_config,
                                      policy=policy,
                                      value=value,
                                      oracle_network=oracle_network,
                                      share_exp=share_exp,
                                      oracle=oracle)

    multitask_agent.train()
 def _setup(self, config):
     print("Loading word vectors...")
     word2index, word_vecs = process_word_vecs(FAST_TEXT)
     # Note that the word embeddings are normalized.
     self.wv = WV(F.normalize(word_vecs), word2index)
     # wv = WV(word_vecs, word2index)
     print("Done.")
     self.corpus_size = config["corpus_size"]
     bigram_fn_name = "diff"
     out_bigram_dim = 300
     dist_fn_name = "cos_dist"
     loss_fn_name = "mrl"
     margin = config["margin"]
     self.lr = config["lr"]
     self.num_epochs = config["num_epochs"]
     self.batch_size = config["batch_size"]
     self.test_model = True
     self.test_freq = config["test_freq"]
     with open(PROCESSED / "train.{}.pkl".format(str(self.corpus_size)), "rb") as f:
         wiki_train = pickle.load(f)
     with open(PROCESSED / "valid.pkl", "rb") as f:
         wiki_valid = pickle.load(f)
     wiki_combined = wiki_train + wiki_valid
     self.corpus = Corpus("wiki", wiki_combined, self.wv, filter_stopwords=True)
     self.model = Net(
         self.wv.vecs.size(1), BigramEncoder(bigram_fn_name), out_bigram_dim
     )
     self.model.to(device)
     self.dist_fn = DistanceFunction(dist_fn_name)
     self.loss_fn = LossFunction(loss_fn_name, margin=margin)
     self.device = device
     self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
     torch.manual_seed(config["seed"])
     print("Traninig on Wikipedia corpus of size {}".format(self.corpus_size))
Exemplo n.º 7
0
def evaluate():
    if mx.context.num_gpus() > 0:
        ctx = mx.gpu()
    else:
        ctx = mx.cpu(0)

    # loading configs
    args = Options().parse()
    cfg = Configs(args.config_path)
    # set logging level
    logging.basicConfig(level=logging.INFO)

    # images
    content_image = tensor_load_rgbimage(cfg.content_image,
                                         ctx,
                                         size=cfg.val_img_size,
                                         keep_asp=True)
    style_image = tensor_load_rgbimage(cfg.style_image,
                                       ctx,
                                       size=cfg.val_style_size)
    style_image = preprocess_batch(style_image)
    # model
    style_model = Net(ngf=cfg.ngf)
    style_model.collect_params().load(cfg.val_model, ctx=ctx)
    # forward
    output = style_model(content_image, style_image)
    # save img
    tensor_save_bgrimage(output[0], cfg.output_img)
    logging.info("Save img to {}".format(cfg.output_img))
Exemplo n.º 8
0
def debug_me():
    # Define network
    net = Net()
    print(net)

    data_transform = transforms.Compose(
        [Rescale(250), RandomCrop(224),
         Normalize()])

    aww_dataset = FacialKeypointsDataset(
        csv_file='data/aww_frames_keypoints.csv',
        root_dir='data/aww/',
        transform=data_transform)

    sample = aww_dataset[0]
    print(sample['image'].shape, sample['keypoints'].shape)
    print(np.max(sample['keypoints']))

    aww_loader = DataLoader(aww_dataset,
                            batch_size=10,
                            shuffle=True,
                            num_workers=4)

    aww_images, aww_outputs, gt_pts = net_sample_output(net, aww_loader)

    visualize_output(aww_images, aww_outputs, gt_pts, 1)
    '''
def main(args):
    if args.adapt_setting == 'svhn2mnist':
        target_dataset = ImageClassdata(txt_file=args.tar_list, root_dir=args.tar_root, img_type=args.img_type,
                                        transform=transforms.Compose([
                                            transforms.Resize(28),
                                            transforms.ToTensor(),
                                        ]))
    elif args.adapt_setting == 'mnist2usps':
        target_dataset = ImageClassdata(txt_file=args.tar_list, root_dir=args.tar_root, img_type=args.img_type,
                                        transform=transforms.Compose([
                                            transforms.Resize(28),
                                            transforms.ToTensor(),
                                        ]))
    else:
        raise NotImplementedError
    dataloader = DataLoader(target_dataset, batch_size=args.batch_size, shuffle=False,
                            drop_last=False, num_workers=1, pin_memory=True)

    model = Net().to(device)
    model.load_state_dict(torch.load(args.MODEL_FILE))
    model.eval()

    total_accuracy = 0
    with torch.no_grad():
        for x, y_true in tqdm(dataloader, leave=False):
            x, y_true = x.to(device), y_true.to(device)
            y_pred = model(x)
            total_accuracy += (y_pred.max(1)[1] == y_true).float().mean().item()
    
    mean_accuracy = total_accuracy / len(dataloader)
    print(f'Accuracy on target data: {mean_accuracy:.4f}')
def main(args):
    train_loader, val_loader = create_dataloaders(args.batch_size)

    model = Net().to(device)
    optim = torch.optim.Adam(model.parameters())
    lr_schedule = torch.optim.lr_scheduler.ReduceLROnPlateau(optim,
                                                             patience=1,
                                                             verbose=True)
    criterion = torch.nn.CrossEntropyLoss()

    best_accuracy = 0
    for epoch in range(1, args.epochs + 1):
        model.train()
        train_loss, train_accuracy = do_epoch(model,
                                              train_loader,
                                              criterion,
                                              optim=optim)

        model.eval()
        with torch.no_grad():
            val_loss, val_accuracy = do_epoch(model,
                                              val_loader,
                                              criterion,
                                              optim=None)

        tqdm.write(
            f'EPOCH {epoch:03d}: train_loss={train_loss:.4f}, train_accuracy={train_accuracy:.4f} '
            f'val_loss={val_loss:.4f}, val_accuracy={val_accuracy:.4f}')

        if val_accuracy > best_accuracy:
            print('Saving model...')
            best_accuracy = val_accuracy
            torch.save(model.state_dict(), 'trained_models/source.pt')

        lr_schedule.step(val_loss)
Exemplo n.º 11
0
    def __init__(self, train_loader, test_loader, config):
        self.config = config
        self.device = config.device

        self.train_loader = train_loader
        self.test_loader = test_loader

        self.n_epoch = config.n_epoch
        self.lr = config.lr
        self.gamma = config.gamma
        self.device = config.device

        # self.start_epoch = 1
        self.start_itr = 1

        n_classes = len(self.train_loader.dataset.classes)
        self.model = Net(n_classes=n_classes).to(self.device)
        print(self.model)
        print('Initialized model...\n')

        self.optim = torch.optim.Adadelta(self.model.parameters(), self.lr)
        self.scheduler = StepLR(self.optim, step_size=1, gamma=self.gamma)

        # if not self.config.model_state_path == '':
        #     self._load_models(self.config.model_state_path)

        self.writer = SummaryWriter(log_dir=self.config.log_dir)
Exemplo n.º 12
0
def main():
    # Load model and data
    net = Net()
    trainloader, testloader = load_data()

    class CifarClient(fl.client.NumPyClient):
        def get_parameters(self):
            return [val.cpu().numpy() for _, val in net.state_dict().items()]

        def set_parameters(self, parameters):
            params_dict = zip(net.state_dict().keys(), parameters)
            state_dict = OrderedDict(
                {k: torch.tensor(v)
                 for k, v in params_dict})
            net.load_state_dict(state_dict, strict=True)

        def fit(self, parameters, config):
            self.set_parameters(parameters)
            train(net, trainloader, epochs=1)
            return self.get_parameters(), len(trainloader), {}

        def evaluate(self, parameters, config):
            self.set_parameters(parameters)
            loss = test(net, testloader)
            return float(loss), len(testloader), {}

    fl.client.start_numpy_client("[::]:8080", client=CifarClient())
Exemplo n.º 13
0
def do_convert(args, logdir):
    # Load graph
    model = Net()

    df = NetDataFlow(hp.convert.data_path, hp.convert.batch_size)

    ckpt = '{}/{}'.format(logdir, args.ckpt) if args.ckpt else tf.train.latest_checkpoint(logdir)
    session_inits = []
    if ckpt:
        session_inits.append(SaverRestore(ckpt))
    pred_conf = PredictConfig(
        model=model,
        input_names=get_eval_input_names(),
        output_names=get_eval_output_names(),
        session_init=ChainInit(session_inits))
    predictor = OfflinePredictor(pred_conf)

    audio, y_audio = convert(predictor, df)

    soundfile.write("a.wav", y_audio[0], 16000, format="wav", subtype="PCM_16")
    soundfile.write("b.wav", audio[0], 16000, format="wav", subtype="PCM_16")

    # Write the result
    tf.summary.audio('A', y_audio, hp.default.sr, max_outputs=hp.convert.batch_size)
    tf.summary.audio('B', audio, hp.default.sr, max_outputs=hp.convert.batch_size)

    writer = tf.summary.FileWriter(logdir)
    with tf.Session() as sess:
        summ = sess.run(tf.summary.merge_all())
    writer.add_summary(summ)
    writer.close()
Exemplo n.º 14
0
def main():
    print('Training Process\nInitializing...\n')
    config.init_env()

    val_dataset = view_data(config.view_net.data_root,
                            status=STATUS_TEST,
                            base_model_name=config.base_model_name)

    val_loader = DataLoader(val_dataset,
                            batch_size=config.view_net.test.batch_sz,
                            num_workers=config.num_workers,
                            shuffle=True)

    # create model
    net = Net(pretrained=True)
    net = net.to(device=config.device)
    net = nn.DataParallel(net)

    print(f'loading pretrained model from {config.view_net.ckpt_file}')
    checkpoint = torch.load(config.view_net.ckpt_file)
    net.module.load_state_dict(checkpoint['model'])

    with torch.no_grad():
        validate(val_loader, net)

    print('test Finished!')
Exemplo n.º 15
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch Vec2Color Example')
    parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=1500, metavar='N',
                        help='number of epochs to train (default: 1500)')
    parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
                        help='learning rate (default: 1.0)')
    parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')

    parser.add_argument('--save-model', action='store_true', default=False,
                        help='For Saving the current Model')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    file_names = ('capitalize', 'lower', 'upper', 'title')
    x_df = pd.concat([pd.read_csv('doc2color/data/{}.csv'.format(file_name)) for file_name in file_names])
    y_df = pd.concat([pd.read_csv('doc2color/data/rgb.csv')] * len(file_names))

    tensor_x = torch.stack([torch.from_numpy(np.array(i)) for i in x_df.values.astype(np.float32)])
    tensor_y = torch.stack([torch.from_numpy(np.array(i)) for i in y_df.values.astype(np.float32) / 255.0])

    x_train, x_test, y_train, y_test = train_test_split(
        tensor_x, tensor_y, test_size=0.01, random_state=args.seed)

    train_dataset = torch.utils.data.TensorDataset(x_train, y_train)
    train_loader = torch.utils.data.DataLoader(train_dataset,
        batch_size=args.batch_size, shuffle=True, **kwargs)

    test_dataset = torch.utils.data.TensorDataset(x_test, y_test)
    test_loader = torch.utils.data.DataLoader(test_dataset,
        batch_size=args.test_batch_size, shuffle=True, **kwargs)

    model = Net().to(device)
    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        test(args, model, device, test_loader)
        scheduler.step()

    if args.save_model:
        torch.save(model.state_dict(), "doc2color/pt_objects/vec2color.pt")
Exemplo n.º 16
0
def main(size, epoch, lr, name, kind, mode):
    net = Net(name, kind, mode, size, epoch, lr)
    if mode == 'train':
        net.get_params_number()
        net.train()
    else:
        net.test()
Exemplo n.º 17
0
def create_model(args):
    model = Net()
    if args.start_epoch:
        path = os.path.join(args.checkpoints_dir,
                            f'checkpoint-{args.start_epoch}.pth')
        model.load_state_dict(torch.load(path))
    else:
        model.load_pretrained(args.ddr_weights)
    return model.to(args.device)
Exemplo n.º 18
0
def main():
    if not os.path.exists(args.output):
        os.makedirs(args.output)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = Net()

    trainer = Trainer(model, device=device, lr=args.lr, epoch=args.epoch)
    trainer.train_and_eval()
Exemplo n.º 19
0
def get_prediction(image_bytes):
    model = Net()
    model.load_state_dict(torch.load('model.pt', map_location='cpu'),
                          strict=False)
    model.eval()
    tensor = transform_image(image_bytes=image_bytes)
    outputs = F.softmax(model(tensor), dim=1)
    top_p, top_class = outputs.topk(1, dim=1)
    return top_p, top_class
Exemplo n.º 20
0
def parse_hgr(hgr_file):
    with open(hgr_file) as hgr:
        num_nets, num_cells = integerify(hgr.readline().split())

        nets = []
        for i in range(num_nets):
            net = Net(i+1, integerify(hgr.readline().split()))
            nets.append(net)

        return num_cells, nets
def main():
    model = Net()
    model.load_state_dict(torch.load(checkpoint_path))
    model.eval()
    tar = prep_data()
    output = model(Variable(tar))
    res = output.cpu().data.numpy()
    res_ = np.squeeze(res)
    num = np.argwhere(res_ == np.max(res_))
    print(int(num))
Exemplo n.º 22
0
def run():
    print("CUDA is available: {}".format(torch.cuda.is_available()))
    data_transform = transforms.Compose(
        [Rescale(250), CenterCrop(224),
         Normalize(), ToTensor()])

    # loader will split datatests into batches witht size defined by batch_size
    train_loader = initialize_train_loader(data_transform)
    test_loader = initialize_test_loader(data_transform)

    model_id = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime())
    # instantiate the neural network
    net = Net()
    net.to(device=device)
    summary(net, (1, 224, 224))
    # define the loss function using SmoothL1Loss
    criterion = nn.SmoothL1Loss()
    # define the params updating function using Adam
    optimizer = optim.Adam(net.parameters(), lr=0.001)

    loss_logger = []

    for i in range(1, epochs + 1):
        model_name = 'model-{}-epoch-{}.pt'.format(model_id, i)

        # train all data for one epoch
        train(net, criterion, optimizer, i, train_loader, model_id,
              loss_logger)

        # evaludate the accuracy after each epoch
        evaluate(net, criterion, i, test_loader)

        # save model after every 5 epochs
        # https://discuss.pytorch.org/t/loading-a-saved-model-for-continue-training/17244/3
        # https://github.com/pytorch/pytorch/issues/2830
        # https://pytorch.org/tutorials/beginner/saving_loading_models.html
        if i % 5 == 1:
            torch.save(
                {
                    'epoch': i,
                    'model': net.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'loss_logger': loss_logger,
                }, model_dir + model_name)

    print("Finished training!")
    model_name = 'model-{}-final.pt'.format(model_id)
    torch.save(
        {
            'epoch': epochs,
            'model': net.state_dict(),
            'optimizer': optimizer.state_dict(),
            'loss_logger': loss_logger,
        }, model_dir + model_name)
Exemplo n.º 23
0
def main():

    # load in a haar cascade classifier for detecting frontal faces
    face_cascade = cv2.CascadeClassifier(
        'detector_architectures/haarcascade_frontalface_default.xml')

    model = Net()
    model.load_state_dict(torch.load('./saved_models/keypoints_model_1.pt'))
    model.eval()

    show_webcam(model, face_cascade)
Exemplo n.º 24
0
 def initialize_networks(self):
     self.networks = []
     self.optimizers = []
     for i in range(self.n_models):
         model = Net()
         model.apply(self.init_weights)
         self.networks.append(model)
         self.optimizers.append(
             optim.SGD(model.parameters(),
                       lr=self.learning_rate,
                       momentum=self.momentum))
     self.criterion = nn.CrossEntropyLoss()
Exemplo n.º 25
0
def main():
    args = get_args()
    print(args)

    # set device
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # device = 'cpu'

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    net = Net(device=device, mode=args.target, target_mode=args.target_mode)
    net = net.eval()
    net = net.to(device)
    load_model(
        net,
        device,
        fullpath='trained_models/Net_continuous_trained/checkpoint_274.pth.tar'
    )

    imgs_dir = '/media/yotamg/bd0eccc9-4cd5-414c-b764-c5a7890f9785/Yotam/Real-Images/png'
    imgs_filelist = [
        os.path.join(imgs_dir, img) for img in os.listdir(imgs_dir)
        if img.endswith('.png')
    ]

    for i, img in enumerate(imgs_filelist):
        # x,x_paths, y, y_paths = data
        x = plt.imread(img)
        x = np.expand_dims(x, 0)
        x = np.transpose(x, (0, 3, 1, 2))
        x = x[:, :, 2:-2, 8:-8]
        x = torch.Tensor(x).to(device)
        with torch.no_grad():
            out = net(x)
        out = out.detach().cpu().numpy()
        x = x.detach().cpu().numpy()
        plt.figure(1)
        if args.target_mode == 'discrete':
            out = np.argmax(out, axis=1)
            out = out[0]
        # out = np.squeeze(out,0)
        out = (out - np.min(out)) / (np.max(out) - np.min(out))
        ax1 = plt.subplot(1, 3, 1)
        # x = (x + 1) / 2
        ax1.imshow(np.transpose(x[0], (1, 2, 0)))
        ax3 = plt.subplot(1, 3, 3, sharex=ax1, sharey=ax1)
        ax3.imshow(out, cmap='jet')
        # plt.suptitle(label, fontsize="large")
        plt.show()
Exemplo n.º 26
0
def load_model(dir):
    """
    Loads models 
    :param dir: directory to load the models from
    :return: a list of all the models in the directory
    """
    networks = []
    for filename in os.listdir(dir):
        net = Net()
        net.load_state_dict(torch.load(os.path.join(dir, filename)))
        net.train(True)
        net.cuda()
        networks.append(net)
    return networks
def get_model(args):
    ''' define model '''
    model = None
    if args.model == 'Net':
        model = Net()
    elif args.model == 'FCNet':
        model = FCNet()
    elif args.model == 'ConvNet':
        model = ConvNet()
    else:
        raise ValueError('The model is not defined!!')

    print('---Model Information---')
    print('Net:', model)
    print('Use GPU:', args.use_cuda)
    return model.to(args.device)
Exemplo n.º 28
0
    def __init__(self, args):
        self.args = args
        self.num_channels = NUM_CHANNELS

        if args.netType == 1:
            self.net = Net(self.num_channels, args)
        elif args.netType == 2:
            self.net = Net2(self.num_channels, args)

        if args.cuda:
            self.net = self.net.cuda()

        self.load_dataset_from_folder()
        self.writer = SummaryWriter()
        self.unique_tok = str(time.time())
        self.init_weights()
Exemplo n.º 29
0
def main():
    # models
    net = Net(base=opt.base)
    net = nn.DataParallel(net).cuda()
    sdict = torch.load('./net.pth')
    net.load_state_dict(sdict)
    val_loader = torch.utils.data.DataLoader(ImageFiles(opt.img_dir,
                                                        opt.prior_dir,
                                                        size=256,
                                                        mean=mean,
                                                        std=std),
                                             batch_size=opt.b,
                                             shuffle=False,
                                             num_workers=4,
                                             pin_memory=True)
    validate(val_loader, net, 'results', opt.gt_dir)
Exemplo n.º 30
0
 def main(self):
     num_of_neurons = 151
     num_of_hidden_layers = 2
     learning_rate = 0.001
     epochs = 10
     args = dict()
     args['num_of_neurons'], args['num_of_hidden_layers'], args[
         'learning_rate'], args[
             'epochs'] = num_of_neurons, num_of_hidden_layers, learning_rate, epochs
     loss = self.objective(args)
     print(loss)
     net = Net(num_of_neurones=num_of_neurons,
               num_of_hidden_layers=num_of_hidden_layers,
               num_of_inputs=self.hw_model.inputs,
               num_of_outputs=self.hw_model.outputs)
     net = net.to(self.device)
     print(self.test(net))