Beispiel #1
0
    args = parser.parse_args()
    print(args)

    config = json.loads(open(args.config, 'r').read())
    print(config)

    cap = cv2.VideoCapture(args.source)

    # [x_min, y_min, x_max, y_max]
    digit_areas = []
    for d in config['digit_pos']:
        digit_areas.append(
            [d[0], d[1], d[0] + config['d_width'], d[1] + config['d_height']])

    model = Net()
    model.load_state_dict(torch.load(config['model'], map_location='cpu'))
    model = model.eval()

    trigger = False

    while True:
        ret, img = cap.read()

        if ret:
            digit_img = []
            for idx, d in enumerate(digit_areas):
                tmp = img[d[1]:d[3], d[0]:d[2]]
                tmp = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)

                tmp = cv2.resize(tmp,
Beispiel #2
0
    # n_step = 10
    use_cuda = True
    is_render = False
    save_model = True
    ###########################################

    device = torch.device("cuda:0" if use_cuda else "cpu")
    if use_cuda:
        try:
            mp.set_start_method('spawn')
        except:
            pass

    ######## Variable for A3C #################
    from SharedAdam import SharedAdam
    g_net = Net(s_dim, a_dim).share_memory()
    g_opt = SharedAdam(g_net.parameters(), lr=0.001)
    ###########################################

    for idx in range(num_worker):
        parent_conn, child_conn = Pipe()
        worker = MarioEnv(env_id,
                          idx,
                          child_conn,
                          queue,
                          s_dim,
                          a_dim,
                          g_net,
                          g_opt,
                          update_iter=10,
                          is_render=is_render,
Beispiel #3
0
BATCH_SIZE = 1000
MAX_LR = 1e-4

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


if __name__ == '__main__':
    # load datasets
    trainset = MNIST("../../Data/MNIST/test/")
    testset = MNIST("../../Data/MNIST/test/")

    trainloader = DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True)
    testloader = DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False)

    # init Model
    model = Net()
    model = model.to(device)

    # init optimizer, lr_sched & loss
    optimizer = Adam(model.parameters(), lr=MAX_LR)
    scheduler = OneCycleLR(optimizer, max_lr=MAX_LR, epochs=100, steps_per_epoch=len(trainloader))
    nllloss = nn.NLLLoss()

    log = tensorboardX.SummaryWriter("MNIST_SVM_3_3")

    # train
    for epoch in range(N_EPOCHS):
        mean_loss = []
        lables_cat = torch.empty(0, dtype=torch.long)
        output_cat = torch.empty(0, dtype=torch.long)
        for batch in tqdm(trainloader, desc=f"Train {epoch}", leave=False):
Beispiel #4
0
# %%
import torch
from model import Net
MODEL_PATH = 'model_cifar10.pth'
OPTIM_PATH = 'optim_cifar10.pt'
FULL_PATH = 'mdoel_full_cifar10.pth'

# Saving model parameters
# torch.save(net.state_dict(),MODEL_PATH)
# torch.save(optimizer.state_dict(),OPTIM_PATH)
# torch.save(net.state_dict(),FULL_PATH )

# # Save entire model
# torch.save(net, PATH)
#net = Net()
# Load model
param_dict = torch.load(MODEL_PATH)
loaded_model = Net.load_state_dict(param_dict)
#loaded_optim = Net.load_state_dict(torch.load(OPTIM_PATH))

# %%
for param in loaded_model.state_dict():
    print(param, model.state_dict()[param].size())

for param in loaded_optim.state_dict():
    print(param, loaded_optim.state_dict()[param].size())
Beispiel #5
0
    def build_model(self):
        """
        Build the model.
        """
        self.model = Net()
        self.model.weight_init()
        # self.model = torch.load('./logs/no7/x2/FSRCNN_model100.pth')

        self.criterion = nn.MSELoss()
        # self.criterion = HuberLoss(delta=0.9) # Huber loss
        # self.criterion = CharbonnierLoss(delta=0.0001) # Charbonnier Loss
        torch.manual_seed(self.seed)

        if self.GPU:
            torch.cuda.manual_seed(self.seed)
            self.model.cuda()
            cudnn.benchmark = True
            self.criterion.cuda()

        # folloe the setting in the official caffe prototext
        self.optimizer = optim.SGD(
            [
                {
                    'params': self.model.first_part[0].weight
                },  # feature extraction layer
                {
                    'params': self.model.first_part[0].bias,
                    'lr': 0.1 * self.lr
                },
                {
                    'params': self.model.mid_part[0][0].weight
                },  # shrinking layer
                {
                    'params': self.model.mid_part[0][0].bias,
                    'lr': 0.1 * self.lr
                },
                {
                    'params': self.model.mid_part[1][0].weight
                },  # mapping layers
                {
                    'params': self.model.mid_part[1][0].bias,
                    'lr': 0.1 * self.lr
                },
                {
                    'params': self.model.mid_part[2][0].weight
                },
                {
                    'params': self.model.mid_part[2][0].bias,
                    'lr': 0.1 * self.lr
                },
                {
                    'params': self.model.mid_part[3][0].weight
                },
                {
                    'params': self.model.mid_part[3][0].bias,
                    'lr': 0.1 * self.lr
                },
                {
                    'params': self.model.mid_part[4][0].weight
                },
                {
                    'params': self.model.mid_part[4][0].bias,
                    'lr': 0.1 * self.lr
                },
                {
                    'params': self.model.mid_part[5][0].weight
                },  # expanding layer
                {
                    'params': self.model.mid_part[5][0].bias,
                    'lr': 0.1 * self.lr
                },
                {
                    'params': self.model.last_part.weight,
                    'lr': 0.1 * self.lr
                },  # deconvolution layer
                {
                    'params': self.model.last_part.bias,
                    'lr': 0.1 * self.lr
                }
            ],
            lr=self.lr,
            momentum=self.mom)
        # self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr, momentum=self.mom)
        # self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[50, 75, 100], gamma=0.5)
        #  lr decay
        print(self.model)
Beispiel #6
0
def main(device=torch.device('cuda:0')):
    """Train CNN and show training plots."""
    # Data loaders
    """
    if check_for_augmented_data("./data"):
        tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
            task="target", batch_size=config("cnn.batch_size"), augment=True
        )
    else:
        tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
            task="target",
            batch_size=config("cnn.batch_size"),
        )
    """
    # pathname = "data/nyu_depth.zip"
    pathname = "data/nyu_small.zip"
    tr_loader, va_loader, te_loader = getTrainingValidationTestingData(pathname,
                                                                       batch_size=util.config("unet.batch_size"))

    # Model
    model = Net()

    # TODO: define loss function, and optimizer
    learning_rate = util.config("unet.learning_rate")
    criterion = DepthLoss(0.1)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    number_of_epoches = 10
    #

    # print("Number of float-valued parameters:", util.count_parameters(model))

    # Attempts to restore the latest checkpoint if exists
    print("Loading unet...")
    model, start_epoch, stats = util.restore_checkpoint(model, util.config("unet.checkpoint"))

    # axes = utils.make_training_plot()

    # Evaluate the randomly initialized model
    # evaluate_epoch(
    #     axes, tr_loader, va_loader, te_loader, model, criterion, start_epoch, stats
    # )
    # loss = criterion()

    # initial val loss for early stopping
    # prev_val_loss = stats[0][1]

    running_va_loss = []
    running_va_acc = []
    running_tr_loss = []
    running_tr_acc = []
    # TODO: define patience for early stopping
    # patience = 1
    # curr_patience = 0
    #
    tr_acc, tr_loss = util.evaluate_model(model, tr_loader, device)
    acc, loss = util.evaluate_model(model, va_loader, device)
    running_va_acc.append(acc)
    running_va_loss.append(loss)
    running_tr_acc.append(tr_acc)
    running_tr_loss.append(tr_loss)

    # Loop over the entire dataset multiple times
    # for epoch in range(start_epoch, config('cnn.num_epochs')):
    epoch = start_epoch
    # while curr_patience < patience:
    while epoch < number_of_epoches:
        # Train model
        util.train_epoch(tr_loader, model, criterion, optimizer)
        tr_acc, tr_loss = util.evaluate_model(model, tr_loader, device)
        va_acc, va_loss = util.evaluate_model(model, va_loader, device)
        running_va_acc.append(va_acc)
        running_va_loss.append(va_loss)
        running_tr_acc.append(tr_acc)
        running_tr_loss.append(tr_loss)
        # Evaluate model
        # evaluate_epoch(
        #     axes, tr_loader, va_loader, te_loader, model, criterion, epoch + 1, stats
        # )

        # Save model parameters
        util.save_checkpoint(model, epoch + 1, util.config("unet.checkpoint"), stats)

        # update early stopping parameters
        """
        curr_patience, prev_val_loss = early_stopping(
            stats, curr_patience, prev_val_loss
        )
        """

        epoch += 1
    print("Finished Training")
    # Save figure and keep plot open
    # utils.save_training_plot()
    # utils.hold_training_plot()
    util.make_plot(running_tr_loss, running_tr_acc, running_va_loss, running_va_acc)
    args.data + '/train_images', transform=data_transforms2),
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           num_workers=10)

val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
    args.data + '/val_images', transform=data_transforms2),
                                         batch_size=args.batch_size,
                                         shuffle=False,
                                         num_workers=1)

### Neural Network and Optimizer
# We define neural net in model.py so that it can be reused by the evaluate.py script
from model import Net

model = Net()
model = model.cuda()  # Force CUDA usage

optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
dtype = torch.cuda.FloatTensor
dtype2 = torch.cuda.LongTensor


def train(epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = Variable(data).type(dtype), Variable(target).type(
            dtype2)
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
Beispiel #8
0
def _search_worker(net_sizes, config, Ws, bs, X, Y, X_test, Y_test):
    net = Net(net_sizes, config, init_theta=False)
    net.Ws = [W.copy() for W in Ws]
    net.bs = [b.copy() for b in bs]
    return net.train(X.copy(), Y.copy(), X_test.copy(), Y_test.copy(), silent=True)
Beispiel #9
0
def main(opt):
    if opt['manual_seed'] is None:
        opt['manual_seed'] = random.randint(1, 10000)
        print('Random Seed: ', opt['manual_seed'])
        random.seed(opt['manual_seed'])
        torch.manual_seed(opt['manual_seed'])
        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(opt['manual_seed'])

    if opt['class_weight'] is not None:
        loss_weight = torch.FloatTensor(opt['class_weight']).to(device)
    else:
        loss_weight = None

    if opt['gamma'] is not None:
        criterion = FocalLoss(alpha=loss_weight,
                              gamma=opt['gamma'],
                              reduction=True)
    else:
        criterion = CrossEntropyLoss(weight=loss_weight)

    files = []
    for file in os.listdir(opt['path']):
        files.append(file[:-3])

    train_ids, val_ids = train_test_split(files, test_size=0.2)

    train_dataset = GRDataset(opt['path'], train_ids)
    val_dataset = GRDataset(opt['path'], val_ids)
    train_loader = DataLoader(train_dataset,
                              batch_size=opt['batch_size'],
                              shuffle=True,
                              drop_last=True)
    val_loader = DataLoader(val_dataset,
                            batch_size=opt['batch_size'],
                            drop_last=True)

    tr_losses = np.zeros((opt['num_epochs'], ))
    tr_accs = np.zeros((opt['num_epochs'], ))
    val_losses = np.zeros((opt['num_epochs'], ))
    val_accs = np.zeros((opt['num_epochs'], ))

    model = Net(num_classes=opt['num_classes'],
                gnn_layers=opt['gnn_layers'],
                embed_dim=opt['embed_dim'],
                hidden_dim=opt['hidden_dim'],
                jk_layer=opt['jk_layer'],
                process_step=opt['process_step'],
                dropout=opt['dropout'])
    model = model.to(device)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=opt['lr'],
                                 weight_decay=opt['weight_decay'])
    best_val_loss = 1e6

    for epoch in range(opt['num_epochs']):
        s = time.time()

        model.train()
        losses = 0
        acc = 0

        for i, data in enumerate(train_loader):
            data = data.to(device)
            optimizer.zero_grad()
            output = model(data)
            # print(data.y.squeeze())
            loss = criterion(output, data.y.squeeze())
            loss.backward()
            optimizer.step()

            y_true = data.y.squeeze().cpu().numpy()
            y_pred = output.data.cpu().numpy().argmax(axis=1)
            acc += accuracy_score(y_true, y_pred) * 100
            losses += loss.data.cpu().numpy()

        tr_losses[epoch] = losses / (i + 1)
        tr_accs[epoch] = acc / (i + 1)

        model.eval()
        v_losses = 0
        v_acc = 0
        y_preds = []
        y_trues = []

        for j, data in enumerate(val_loader):
            data = data.to(device)
            with torch.no_grad():
                output = model(data)
                loss = criterion(output, data.y.squeeze())

            y_pred = output.data.cpu().numpy().argmax(axis=1)
            y_true = data.y.squeeze().cpu().numpy()
            y_trues += y_true.tolist()
            y_preds += y_pred.tolist()
            v_acc += accuracy_score(y_true, y_pred) * 100
            v_losses += loss.data.cpu().numpy()

        cnf = confusion_matrix(y_trues, y_preds)
        val_losses[epoch] = v_losses / (j + 1)
        val_accs[epoch] = v_acc / (j + 1)

        current_val_loss = v_losses / (j + 1)

        if current_val_loss < best_val_loss:
            best_val_loss = current_val_loss
            best_cnf = cnf
            torch.save(model.state_dict(),
                       os.path.join(output_path, 'best_model.ckpt'))

        print(
            'Epoch: {:03d} | time: {:.4f} seconds\n'
            'Train Loss: {:.4f} | Train accuracy {:.4f}\n'
            'Validation Loss: {:.4f} | Validation accuracy {:.4f} | Best {:.4f}'
            .format(epoch + 1,
                    time.time() - s, losses / (i + 1), acc / (i + 1),
                    v_losses / (j + 1), v_acc / (j + 1), best_val_loss))
        print('Validation confusion matrix:')
        print(cnf)

    np.save(os.path.join(log_path, 'train_loss.npy', tr_losses))
    np.save(os.path.join(log_path, 'train_acc.npy', tr_accs))
    np.save(os.path.join(log_path, 'val_loss.npy', val_losses))
    np.save(os.path.join(log_path, 'val_acc.npy', val_accs))
    np.save(os.path.join(log_path, 'confusion_matrix.npy', best_cnf))
Beispiel #10
0
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    trainset = torchvision.datasets.CIFAR10(
        root=args.data_path,
        train=True,
        download=False,
        transform=transform,
    )
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=4,
                                              shuffle=True,
                                              num_workers=2)

    # define convolutional network
    net = Net()

    # set up pytorch loss /  optimizer
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = optim.SGD(
        net.parameters(),
        lr=args.learning_rate,
        momentum=args.momentum,
    )

    loss = 0
    # train the network
    for epoch in range(2):

        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
def train(train_samples,
          valid_samples,
          word2num,
          lr = 0.001,
          epoch = 5,
          use_cuda = False):

    print('Training...')

    # Prepare training data
    print('  Preparing training data...')
    statement_word2num = word2num[0]
    subject_word2num = word2num[1]
    speaker_word2num = word2num[2]
    speaker_pos_word2num = word2num[3]
    state_word2num = word2num[4]
    party_word2num = word2num[5]
    context_word2num = word2num[6]

    train_data = train_samples
    dataset_to_variable(train_data, use_cuda)
    valid_data = valid_samples
    dataset_to_variable(valid_data, use_cuda)

    # Construct model instance
    print('  Constructing network model...')
    model = Net(len(statement_word2num),
                len(subject_word2num),
                len(speaker_word2num),
                len(speaker_pos_word2num),
                len(state_word2num),
                len(party_word2num),
                len(context_word2num))
    if use_cuda: model.cuda()

    # Start training
    print('  Start training')

    optimizer = optim.Adam(model.parameters(), lr = lr)
    model.train()

    step = 0
    display_interval = 2000

    for epoch_ in range(epoch):
        print('  ==> Epoch '+str(epoch_)+' started.')
        random.shuffle(train_data)
        total_loss = 0
        for sample in train_data:

            optimizer.zero_grad()

            prediction = model(sample)
            label = Variable(torch.LongTensor([sample.label]))
            loss = F.cross_entropy(prediction, label)
            loss.backward()
            optimizer.step()

            step += 1
            if step % display_interval == 0:
                print('    ==> Iter: '+str(step)+' Loss: '+str(loss))

            total_loss += loss.data.numpy()

        print('  ==> Epoch '+str(epoch_)+' finished. Avg Loss: '+str(total_loss/len(train_data)))

        valid(valid_data, word2num, model)

    return model
    occ_likely = []
    for i in range(len(categories_train)):
        # setting the same occlusion likelihood for all classes
        occ_likely.append(likely)

    # load the CompNet initialized with ML and spectral clustering
    mix_models = getCompositionModel(device_ids,
                                     mix_model_path,
                                     layer,
                                     categories_train,
                                     compnet_type=compnet_type)
    net = Net(extractor,
              weights,
              vMF_kappa,
              occ_likely,
              mix_models,
              bool_mixture_bg=bool_mixture_model_bg,
              compnet_type=compnet_type,
              num_mixtures=num_mixtures,
              vc_thresholds=cfg.MODEL.VC_THRESHOLD)
    if bool_load_pretrained_model:
        net.load_state_dict(
            torch.load(pretrained_file,
                       map_location='cuda:{}'.format(
                           device_ids[0]))['state_dict'])

    net = net.cuda(device_ids[0])

    train_imgs = []
    train_masks = []
    train_labels = []
Beispiel #13
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(
        description='PyTorch: Deep Mutual Learning')
    parser.add_argument('--worker-num',
                        type=int,
                        default=5,
                        metavar='N',
                        help='the number of wokers/nodes (default: 5)')
    parser.add_argument('--batch-size',
                        type=int,
                        default=128,
                        metavar='N',
                        help='input batch size for training (default: 128)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=50000,
                        metavar='N',
                        help='number of epochs to train (default: 50000)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    [dataset_1, dataset_2, dataset_3, dataset_4, dataset_5,
     dataset_kl] = dataset_split()

    train_loader_1 = torch.utils.data.DataLoader(dataset_1,
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 **kwargs)
    train_loader_2 = torch.utils.data.DataLoader(dataset_2,
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 **kwargs)
    train_loader_3 = torch.utils.data.DataLoader(dataset_3,
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 **kwargs)
    train_loader_4 = torch.utils.data.DataLoader(dataset_4,
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 **kwargs)
    train_loader_5 = torch.utils.data.DataLoader(dataset_5,
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 **kwargs)

    train_loader_share = torch.utils.data.DataLoader(
        dataset_kl, batch_size=args.batch_size, shuffle=True)

    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../../distillation-based/data',
        train=False,
        transform=transforms.Compose([transforms.ToTensor()])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    worker_num = args.worker_num  # the number of workers/nodes

    model_path = os.path.join(os.getcwd(), "modelset")
    model_result = os.path.join(os.getcwd(), "modelresult")
    print(os.listdir(model_path))
    length = len([_ for _ in os.listdir(model_path)])

    model_set = []
    for _ in range(length):
        model = Net()
        model_set.append(model.to(device))

    optimizer_set = []
    for worker_id in range(worker_num):
        optimizer_set.append(
            optim.SGD(model_set[worker_id].parameters(), lr=5e-3))

    # optimizer_set_kl = []
    # for worker_id in range(worker_num):
    #     optimizer_set_kl.append(optim.SGD(model_set[worker_id].parameters(), lr=5e-3))

# scheduler_set = []
# for worker_id in range(worker_num):
# scheduler_set.append(torch.optim.lr_scheduler.StepLR(optimizer_set[worker_id], step_size=32, gamma=0.9)) #每过32个epoch训练,学习率就乘gamma

    model_global = Net().to(device)

    # for epoch in range(1, args.epochs + 1):
    #     w_global_copy = copy.deepcopy(model_global.state_dict())
    #     for worker_id in range(worker_num):
    #         model_set[worker_id].load_state_dict(w_global_copy)
    #     train_s(args, worker_num, model_set, device, [train_loader_1,train_loader_2, train_loader_3, train_loader_4, train_loader_5], optimizer_set, epoch)
    model_file_num = 0
    dict = []
    for i in range(length):
        model_set[i].load_state_dict(
            torch.load(os.path.join(model_path,
                                    'distillation_mnist_iid_{}.pt'.format(i)),
                       map_location='cpu'))
        dict.append(copy.deepcopy(model_set[i].state_dict()))

    w_global = FedAvg(dict)
    model_global.load_state_dict(w_global)
    model_global = model_global.to(device)

    if (args.save_model):
        torch.save(model_global.state_dict(),
                   os.path.join(model_result, "FedAvg_mnist_iid.pt"))
Beispiel #14
0
def train():
    args = parser.parse_args()
    print("Number of GPUS available" + str(torch.cuda.device_count()))
    model = Net().cuda()
    #model.load_state_dict(torch.load("/home/yzm/pyproject/hw4/chkpt/guassian/model_ffinal_epoch.state"))
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=0.0001)

    #useless
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=10,
                                                gamma=0.1)

    dataset = DatasetFromFolder()

    dataloader = DataLoader(dataset=dataset,
                            batch_size=args.batch_size,
                            shuffle=True)
    print(
        f'start with dataloader:{len(dataloader)},batchsize of {args.batch_size}'
    )
    args = parser.parse_args()
    model.train()
    iterationnum = 0
    losslist = []
    iteration = []
    for ei in range(0, args.Epochs):
        train_loss = 0

        for x, target in dataloader:
            iterationnum = iterationnum + 1
            x = x.cuda()
            target = target.cuda()
            y = model(x)
            loss = (100 - PSNR(y, target)).cuda()
            iteration.append(iterationnum)
            losslist.append(loss)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print("{%3d}/{%3d} -------loss:{%3f}" %
                  (ei, args.Epochs - 1, loss))

            train_loss += loss.item() / len(dataloader)
        scheduler.step()
        print(f"----------------{ei}epoch's loss is{train_loss}")

        if ei % args.save_every == args.save_every - 1:
            print(f"save model model_{ei}_epoch")
            torch.save(model.state_dict(),
                       f"../chkpt/{args.exp_name}/model_{ei}_epoch.state")
    torch.save(model.state_dict(),
               f"../chkpt/{args.exp_name}/model_final_epoch.state")
    plt.figure()
    plt.plot(iteration, losslist, label='loss')
    plt.draw()
    plt.show()
    plt.savefig("/home/yzm/pyproject/hw4/experience/loss.jpg")
Beispiel #15
0
cuda = opt.cuda
if cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

torch.manual_seed(opt.seed)
if cuda:
    torch.cuda.manual_seed(opt.seed)

print('===> Loading datasets')
train_set = get_training_set(opt.upscale_factor)
test_set = get_test_set(opt.upscale_factor)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)

print('===> Building model')
model = Net(upscale_factor=opt.upscale_factor)
criterion = nn.MSELoss()

if cuda:
    model = model.cuda()
    criterion = criterion.cuda()

optimizer = optim.Adam(model.parameters(), lr=opt.lr)


def train(epoch):
    epoch_loss = 0
    for iteration, batch in enumerate(training_data_loader, 1):
        input, target = Variable(batch[0]), Variable(batch[1])
        if cuda:
            input = input.cuda()
Beispiel #16
0
            captions = glob.glob(str(batch) + '/gt/*.txt')
            if len(captions) < 10:
                continue
            if data_map.get(str(cat)) is None:
                data_map[str(cat)]['images'] = sorted(images)
                data_map[str(cat)]['captions'] = sorted(captions)
            else:
                data_map[str(cat)]['images'] = data_map[str(
                    cat)]['images'] + sorted(images)
                data_map[str(cat)]['captions'] = data_map[str(
                    cat)]['captions'] + sorted(captions)

    input_size = (300, 400, 3)
    SEED = 38

    net = Net(input_size, SEED)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

    transform = transforms.Compose([
        transforms.Resize((300, 400)),
        transforms.ToTensor(),
        transforms.RandomErasing(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    dataset = ActionsDataset(data_map, transform)

    BATCH_SIZE = 8
    VAL_SPLIT = .2
Beispiel #17
0
class LFTReward:

    def __init__(self):
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        # self.device = torch.device('cpu')
        self.encoder_net = Net()
        self.encoder_net = self.encoder_net.to(self.device)
        self.encoder_net.double()
        self.encoder_net.load_state_dict(torch.load(model_path, map_location={'cuda:0': 'cpu'}))
        # self.encoder_net.load_state_dict(torch.load(model_path))
        self.encoder_net.eval()

        img_g = self.load_data()
        img_g = torch.tensor(img_g).to(self.device)
        self.img_g_z = self.encoder_net(img_g)

    @staticmethod
    def distance(x1, x2):
        diff = torch.abs(x1 - x2).contiguous()
        return torch.pow(diff, 2)

    @staticmethod
    def load_data():
        img_ds = []
        for i in range(1, folder_size + 1):
            img_d = cv2.imread(f'goal_abstract/{i}/bw_{i}.png')
            img_ds.append(img_d / 255.0)
        return img_ds

    @staticmethod
    def rgb2gray(rgb):
        return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])

    def reward(self, s, j):
        # gray = self.rgb2gray(s)
        # resized_img = cv2.resize(gray, (256, 256))

        resized_img = s.cpu().detach().numpy()
        resized_img = resized_img / 255.0

        # resized_img = resized_img[:, :, :, None]
        rgb_image = cv2.merge((resized_img, resized_img, resized_img))
        rgb_image = rgb_image.reshape((-1, 84, 84, 3))
        pixel_val = rgb_image[0, 10, 60, 0]

        if pixel_val == 0:
            goal_wall = True
        elif abs(pixel_val - 0.75) < 0.01:
            goal_wall = True
        else:
            goal_wall = False

        img_s = torch.tensor(rgb_image).to(self.device)
        img_s = img_s.double()
        img_s_z = self.encoder_net(img_s)

        rewards = self.calc_reward1(img_s_z, goal_wall)
        return rewards

    def calc_reward1(self, img_s_z, goal_wall):

        reward_list = []
        for frame in range(img_s_z.shape[0]):

            distance_list = []
            for i in range(self.img_g_z.shape[0]):
                distance = torch.clamp(self.distance(img_s_z[frame], self.img_g_z[i]), max=5).cpu().detach().numpy()
                distance = np.sum(distance)
                distance_list.append(distance)

            reward = 0
            if distance_list[0] < 2.5:
                reward += 50
            if distance_list[1] < 3:
                reward += 50
            # if distance_list[2] < 3:
            #     reward -= 2
            reward_list.append(reward)

        return reward_list

    def calc_reward2(self, img_s_z):

        reward_list = []
        for frame in range(img_s_z.shape[0]):
            reward_sum_per_frame = 0
            for i in range(self.img_g_z.shape[0]):
                distance = torch.clamp(self.distance(img_s_z[frame], self.img_g_z[i]), max=5).cpu().detach().numpy()
                reward = np.exp(-1 * distance / 10.0)
                reward = np.sum(reward)
                reward_sum_per_frame += reward
            reward_list.append(reward_sum_per_frame)

        return reward_list
def main(device, tr_loader, va_loader, te_loader, modelSelection):
    """Train CNN and show training plots."""
    # CLI arguments
    # parser = arg.ArgumentParser(description='We all know what we are doing. Fighting!')
    # parser.add_argument("--datasize", "-d", default="small", type=str,
    #                     help="data size you want to use, small, medium, total")
    # Parsing
    # args = parser.parse_args()
    # Data loaders
    # datasize = args.datasize
    # Model
    if modelSelection.lower() == 'res50':
        model = Res50()
    elif modelSelection.lower() == 'dense121':
        model = Dense121()
    elif modelSelection.lower() == 'mobv2':
        model = Mob_v2()
    elif modelSelection.lower() == 'dense169':
        model = Dense169()
    elif modelSelection.lower() == 'mob':
        model = Net()
    elif modelSelection.lower() == 'squeeze':
        Squeeze()
    else:
        assert False, 'Wrong type of model selection string!'
    # Model
    # model = Net()
    # model = Squeeze()
    model = model.to(device)

    # TODO: define loss function, and optimizer
    learning_rate = utils.config(modelSelection + ".learning_rate")
    criterion = DepthLoss(0.1).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    number_of_epoches = 10
    #

    # Attempts to restore the latest checkpoint if exists
    print("Loading unet...")
    model, start_epoch, stats = utils.restore_checkpoint(
        model, utils.config(modelSelection + ".checkpoint"))

    running_va_loss = [] if 'va_loss' not in stats else stats['va_loss']
    running_va_acc = [] if 'va_err' not in stats else stats['va_err']
    running_tr_loss = [] if 'tr_loss' not in stats else stats['tr_loss']
    running_tr_acc = [] if 'tr_err' not in stats else stats['tr_err']
    tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device)
    acc, loss = utils.evaluate_model(model, va_loader, device)
    running_va_acc.append(acc)
    running_va_loss.append(loss)
    running_tr_acc.append(tr_acc)
    running_tr_loss.append(tr_loss)
    stats = {
        'va_err': running_va_acc,
        'va_loss': running_va_loss,
        'tr_err': running_tr_acc,
        'tr_loss': running_tr_loss,
        # 'num_of_epoch': 0
    }
    # Loop over the entire dataset multiple times
    # for epoch in range(start_epoch, config('cnn.num_epochs')):
    epoch = start_epoch
    # while curr_patience < patience:
    while epoch < number_of_epoches:
        # Train model
        utils.train_epoch(device, tr_loader, model, criterion, optimizer)
        # Save checkpoint
        utils.save_checkpoint(model, epoch + 1,
                              utils.config(modelSelection + ".checkpoint"),
                              stats)
        # Evaluate model
        tr_acc, tr_loss = utils.evaluate_model(model, tr_loader, device)
        va_acc, va_loss = utils.evaluate_model(model, va_loader, device)
        running_va_acc.append(va_acc)
        running_va_loss.append(va_loss)
        running_tr_acc.append(tr_acc)
        running_tr_loss.append(tr_loss)
        epoch += 1
    print("Finished Training")
    utils.make_plot(running_tr_loss, running_tr_acc, running_va_loss,
                    running_va_acc)
Beispiel #19
0
    torchvision.transforms.Resize((128, 64)),
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
])
queryloader = torch.utils.data.DataLoader(torchvision.datasets.ImageFolder(
    query_dir, transform=transform),
                                          batch_size=64,
                                          shuffle=False)
galleryloader = torch.utils.data.DataLoader(torchvision.datasets.ImageFolder(
    gallery_dir, transform=transform),
                                            batch_size=64,
                                            shuffle=False)

# net definition
net = Net(reid=True)
assert os.path.isfile(
    "/home/ganhaiyang/SlowFast-Network-Detection/deep/checkpoint/ckpt.t7"
), "Error: no checkpoint file found!"
print('Loading from checkpoint/ckpt.t7')
checkpoint = torch.load(
    "/home/ganhaiyang/SlowFast-Network-Detection/deep/checkpoint/ckpt.t7")
net_dict = checkpoint['net_dict']
net.load_state_dict(net_dict)
net.eval()
net.to(device)

# compute features
query_features = torch.tensor([]).float()
query_labels = torch.tensor([]).long()
gallery_features = torch.tensor([]).float()
Beispiel #20
0
])
queryloader = torch.utils.data.DataLoader(
    torchvision.datasets.ImageFolder(query_dir, transform=transform),
    batch_size=64, shuffle=False
)
galleryloader = torch.utils.data.DataLoader(
    torchvision.datasets.ImageFolder(gallery_dir, transform=transform),
    batch_size=64, shuffle=False
)
#print(len(queryloader.dataset.classes)) #750 class (train_set_class=751'e eşit olmak zorunda değil. market 1501, 750+751=1501 adet class içeriyor.)
#print(len(galleryloader.dataset.classes)) #752 class (fazla olan class 0 ve -1, bunlarda istenmeyen resimler var)
#print(len(queryloader)) #53
#print(len(galleryloader)) #509

# net definition
net = Net(reid=True)
assert os.path.isfile("./checkpoint/ckpt.t7"), "Error: no checkpoint file found!"
print('Loading from checkpoint/ckpt.t7')
# orj: checkpoint = torch.load("./checkpoint/ckpt.t7", map_location=torch.device('cpu'))
#ancak cuda'sız çalışması için yeni bir parametre eklenmeli
checkpoint = torch.load("./checkpoint/ckpt.t7", map_location=torch.device('cpu'))
net_dict = checkpoint['net_dict']
net.load_state_dict(net_dict, strict=False)
net.eval() #dropout gibi optimizasyonları kapatmak için, net.train() ile geri açılıyor.
net.to(device)

# compute features
query_features = torch.tensor([]).float()
query_labels = torch.tensor([]).long()
gallery_features = torch.tensor([]).float()
gallery_labels = torch.tensor([]).long()
Beispiel #21
0
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
import torchvision.models as models
import matplotlib.pyplot as plt
import numpy as np
import cv2
from model import Net

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

netmodel = Net()

state_dict = torch.load("mnistcnn.pth.tar", map_location='cpu')
from collections import OrderedDict

new_state_dict = OrderedDict()
for k, v in state_dict.items():
    name = k
    if k[0:7] == "module.":
        name = k[7:]
    new_state_dict[name] = v
netmodel.load_state_dict(new_state_dict)
netmodel.eval()


def visualmodle(initimagefile, netmodel, layer, channel):
Beispiel #22
0
def generate_tensors(net: Net):
    for name, dtype, data in net.weights_iter():
        tensor = train_pb2.Tensor(name=name, data=data, dtype=dtype)
        logger.debug('get tensor: %s (%s)', name, dtype)
        yield tensor
Beispiel #23
0
class solver(object):
    def __init__(self, config, train_loader, set5_h5_loader, set14_h5_loader,
                 set5_img_loader, set14_img_loader):
        self.model = None
        self.lr = config.lr
        self.batch_size = config.batch_size
        self.mom = config.mom
        self.logs = config.logs
        self.n_epochs = config.n_epochs
        self.criterion = None
        self.optimizer = None
        self.scheduler = None
        self.GPU = torch.cuda.is_available()
        self.seed = config.seed
        self.train_loader = train_loader
        self.set5_h5_loader = set5_h5_loader
        self.set14_h5_loader = set14_h5_loader
        self.set5_img_loader = set5_img_loader
        self.set14_img_loader = set14_img_loader
        self.logger = Logger(self.logs + '/')
        self.info = {'loss': 0, 'PSNR for Set5': 0, 'PSNR for Set14': 0}
        self.final_para = []
        self.initial_para = []
        self.graph = True
        self.to_tensor = ToTensor()

        if not os.path.isdir(self.logs):
            os.makedirs(self.logs)

    def build_model(self):
        """
        Build the model.
        """
        self.model = Net()
        self.model.weight_init()
        # self.model = torch.load('./logs/no7/x2/FSRCNN_model100.pth')

        self.criterion = nn.MSELoss()
        # self.criterion = HuberLoss(delta=0.9) # Huber loss
        # self.criterion = CharbonnierLoss(delta=0.0001) # Charbonnier Loss
        torch.manual_seed(self.seed)

        if self.GPU:
            torch.cuda.manual_seed(self.seed)
            self.model.cuda()
            cudnn.benchmark = True
            self.criterion.cuda()

        # folloe the setting in the official caffe prototext
        self.optimizer = optim.SGD(
            [
                {
                    'params': self.model.first_part[0].weight
                },  # feature extraction layer
                {
                    'params': self.model.first_part[0].bias,
                    'lr': 0.1 * self.lr
                },
                {
                    'params': self.model.mid_part[0][0].weight
                },  # shrinking layer
                {
                    'params': self.model.mid_part[0][0].bias,
                    'lr': 0.1 * self.lr
                },
                {
                    'params': self.model.mid_part[1][0].weight
                },  # mapping layers
                {
                    'params': self.model.mid_part[1][0].bias,
                    'lr': 0.1 * self.lr
                },
                {
                    'params': self.model.mid_part[2][0].weight
                },
                {
                    'params': self.model.mid_part[2][0].bias,
                    'lr': 0.1 * self.lr
                },
                {
                    'params': self.model.mid_part[3][0].weight
                },
                {
                    'params': self.model.mid_part[3][0].bias,
                    'lr': 0.1 * self.lr
                },
                {
                    'params': self.model.mid_part[4][0].weight
                },
                {
                    'params': self.model.mid_part[4][0].bias,
                    'lr': 0.1 * self.lr
                },
                {
                    'params': self.model.mid_part[5][0].weight
                },  # expanding layer
                {
                    'params': self.model.mid_part[5][0].bias,
                    'lr': 0.1 * self.lr
                },
                {
                    'params': self.model.last_part.weight,
                    'lr': 0.1 * self.lr
                },  # deconvolution layer
                {
                    'params': self.model.last_part.bias,
                    'lr': 0.1 * self.lr
                }
            ],
            lr=self.lr,
            momentum=self.mom)
        # self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr, momentum=self.mom)
        # self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[50, 75, 100], gamma=0.5)
        #  lr decay
        print(self.model)

    def save(self, epoch):
        """
        Save model.
        :param epoch: number of current epoch
        """
        model_out_path = self.logs + '/FSRCNN_model' + str(epoch) + '.pth'
        torch.save(self.model, model_out_path)
        print("Checkpoint saved to {}".format(model_out_path))

    def train(self):
        """
        The main traning function.
        """
        self.model.train()
        train_loss = 0
        for batch_num, (data, target) in enumerate(self.train_loader):
            if self.GPU:
                data, target = data.cuda(), target.cuda()

            # if self.graph: # plot the network
            #     graph = make_dot(self.model(data))
            #     graph.view()
            #     self.graph = False

            self.optimizer.zero_grad()
            model_out = self.model(data)
            loss = self.criterion(model_out, target)
            train_loss += loss.item()
            loss.backward()
            self.optimizer.step()
            progress_bar(batch_num, len(self.train_loader),
                         'Loss: %.5f' % (train_loss / (batch_num + 1)))

        self.info['loss'] = train_loss / len(self.train_loader)

    def test_set5_patch(self):
        self.model.eval()
        avg_psnr = 0
        for batch_num, (data, target) in enumerate(self.set5_h5_loader):
            if self.GPU:
                data, target = data.cuda(), target.cuda()
            else:
                data, target = data, target

            prediction = self.model(data)
            mse = self.criterion(prediction, target)
            psnr = 10 * log10(1 / mse.item())
            avg_psnr += psnr
            progress_bar(batch_num, len(self.set5_h5_loader),
                         'PSNR: %.4fdB' % (avg_psnr / (batch_num + 1)))

        self.info['PSNR for Set5 patch'] = avg_psnr / len(self.set5_h5_loader)

    def test_set14_patch(self):
        self.model.eval()
        avg_psnr = 0
        for batch_num, (data, target) in enumerate(self.set14_h5_loader):
            if self.GPU:
                data, target = data.cuda(), target.cuda()

            prediction = self.model(data)
            mse = self.criterion(prediction, target)
            psnr = 10 * log10(1 / mse.item())
            avg_psnr += psnr
            progress_bar(batch_num, len(self.set14_h5_loader),
                         'PSNR: %.4fdB' % (avg_psnr / (batch_num + 1)))

        self.info['PSNR for Set14 patch'] = avg_psnr / len(
            self.set14_h5_loader)

    def test_set5_img(self):
        """
        Get PSNR value for test set Set 5 images, and write to Tensorboards logs.
        """
        self.model.eval()
        avg_psnr = 0
        for batch_num, (data, target) in enumerate(self.set5_img_loader):
            target = target.numpy()
            target = target[:, :, 6:target.shape[2] - 6, 6:target.shape[3] - 6]
            # target = torch.from_numpy(target)
            if self.GPU:
                data, target = data.cuda(), torch.from_numpy(target).cuda()
            else:
                data, target = data, torch.from_numpy(target)

            prediction = self.model(data)
            prediction = prediction.data.cpu().numpy()
            prediction = prediction[:, :, 6:prediction.shape[2] - 6,
                                    6:prediction.shape[3] - 6]
            if self.GPU:
                prediction = torch.from_numpy(prediction).cuda()
            else:
                prediction = torch.from_numpy(prediction)
            mse = self.criterion(prediction, target)
            psnr = 10 * log10(1 / mse.item())
            avg_psnr += psnr
            progress_bar(batch_num, len(self.set5_img_loader),
                         'PSNR: %.4fdB' % (avg_psnr / (batch_num + 1)))

        self.info['PSNR for Set5'] = avg_psnr / len(self.set5_img_loader)

    def test_set14_img(self):
        """
        Get PSNR value for test set Set 14 images, and write to Tensorboards logs.
        """
        self.model.eval()
        avg_psnr = 0
        for batch_num, (data, target) in enumerate(self.set14_img_loader):
            target = target.numpy()
            target = target[:, :, 6:target.shape[2] - 6, 6:target.shape[3] - 6]
            # target = torch.from_numpy(target)
            if self.GPU:
                data, target = data.cuda(), torch.from_numpy(target).cuda()
            else:
                data, target = data, torch.from_numpy(target)

            prediction = self.model(data)
            prediction = prediction.data.cpu().numpy()
            prediction = prediction[:, :, 6:prediction.shape[2] - 6,
                                    6:prediction.shape[3] - 6]
            if self.GPU:
                prediction = torch.from_numpy(prediction).cuda()
            else:
                prediction = torch.from_numpy(prediction)
            mse = self.criterion(prediction, target)
            psnr = 10 * log10(1 / mse.item())
            avg_psnr += psnr
            progress_bar(batch_num, len(self.set14_img_loader),
                         'PSNR: %.4fdB' % (avg_psnr / (batch_num + 1)))

        self.info['PSNR for Set14'] = avg_psnr / len(self.set14_img_loader)

    def predict(self, epoch):
        """
        Get the prediction result and write to Tensorboard logs.
        :param epoch: the current epoch number
        """
        self.model.eval()
        butterfly = load_img('./bmp/butterfly86.bmp')
        butterfly = torch.unsqueeze(self.to_tensor(butterfly), 0)
        if self.GPU:
            data = butterfly.cuda()
        else:
            data = butterfly
        prediction = self.model(data).data.cpu().numpy()[0][0]
        self.logger.image_summary('prediction', prediction, epoch)
        # imsave(self.logs + '/prediction_' + str(epoch) + '.bmp', prediction)

    def plot_fig(self, tensor, filename, num_cols=8):
        """
        Plot the parameters to images.
        :param tensor: the tensor need to plot
        :param filename: the filename of the saved images
        :param num_cols: number of columns of filters in the images
        """
        num_kernels = tensor.shape[0]
        num_rows = ceil(num_kernels / num_cols)
        fig = plt.figure(figsize=(num_cols, num_rows))
        for i in range(tensor.shape[0]):
            ax1 = fig.add_subplot(num_rows, num_cols, i + 1)
            ax1.imshow(tensor[i][0], norm=Normalize())
            ax1.axis('off')
            ax1.set_xticklabels([])
            ax1.set_yticklabels([])

        plt.subplots_adjust(wspace=0.1, hspace=0.1)
        plt.savefig(self.logs + '/' + filename + '.png')
        # plt.show()

    def get_para(self):
        """
        Return the pamameters in the model.
        """
        para = []
        for parameter in self.model.parameters():
            para.append(parameter.data.cpu().numpy())
        return para

    def validate(self):
        """
        Main function to run solver.
        """
        self.build_model()

        self.initial_para = self.get_para()
        for epoch in range(1, self.n_epochs + 1):
            if epoch == 1:  # log initial para
                self.logger.histo_summary('initial fisrt layer para',
                                          self.initial_para[0], epoch)
                self.logger.histo_summary('initial last layer para',
                                          self.initial_para[-2], epoch)
                self.plot_fig(self.initial_para[0], 'first_layer_initial')
                self.plot_fig(self.initial_para[-2], 'last_layer_initial')
            elif (epoch % 5 == 0) or (epoch == self.n_epochs):  # log para
                self.logger.histo_summary(
                    'fisrt layer para',
                    self.final_para[0] - self.initial_para[0], epoch)
                self.logger.histo_summary(
                    'last layer para',
                    self.final_para[-2] - self.initial_para[-2], epoch)
            print("\n===> Epoch {} starts:".format(epoch))

            self.train()

            if (epoch % 2
                    == 0) and (self.train_loader.batch_size < self.batch_size):
                self.train_loader.batch_size *= 2
                self.train_loader.batch_sampler.batch_size *= 2

            # print('Testing Set5 patch:')
            # self.test_set5_patch()
            # print('Testing Set14 patch:')
            # self.test_set14_patch()
            print('Testing Set5:')
            with torch.no_grad():
                self.test_set5_img()
            print('Testing Set14:')
            with torch.no_grad():
                self.test_set14_img()
            # self.scheduler.step(epoch)
            self.final_para = self.get_para()

            for tag, value in self.info.items():
                self.logger.scalar_summary(tag, value, epoch)

            self.predict(epoch)
            if (epoch % 50 == 0) or (epoch == self.n_epochs) or (epoch == 1):
                if epoch != 1:
                    self.save(epoch)
                # plot the para
                self.plot_fig(self.final_para[0] - self.initial_para[0],
                              '/first_diff_' + str(epoch))
                self.plot_fig(self.final_para[-2] - self.initial_para[-2],
                              '/last_diff_' + str(epoch))
                self.plot_fig(self.final_para[0], '/first_' + str(epoch))
                self.plot_fig(self.final_para[-2], '/last_' + str(epoch))
Beispiel #24
0
class Detector(object):
    def __init__(self,
                 model,
                 image_size=Config.IMAGE_SIZE,
                 threshold=Config.PREDICTION_THRESHOLD):
        if type(model) == str:
            checkpoint = torch.load(seek_model(model))
            self.model = Net().to(device)
            self.model.load_state_dict(checkpoint['state_dict'], strict=True)
        else:
            self.model = model
        self.model.eval()
        self.threshold = threshold
        self.image_size = image_size

        anchor_configs = (
            Config.ANCHOR_STRIDE,
            Config.ANCHOR_SIZE,
        )

    def convert_predictions(self, predictions, path, anchors):
        # get sorted indices by score

        scores, klass = torch.max(softmax(predictions[:, 4:]), dim=1)
        inds = klass != 0

        scores, klass, predictions, anchors = \
            scores[inds], klass[inds], predictions[inds], anchors[inds]

        if len(scores) == 0:
            return None

        scores, inds = torch.sort(scores, descending=True)
        klass, predictions, anchors = klass[inds], predictions[inds], anchors[
            inds]

        # inds = scores > self.threshold
        # scores, klass, predictions, anchors = \
        #     scores[inds], klass[inds], predictions[inds], anchors[inds]

        scores, klass, predictions, anchors = \
            scores[:200], klass[:200], predictions[:200], anchors[:200]

        if len(predictions) == 0:
            return None
        anchors = anchors.to(device).float()

        x = (predictions[:, 0] * anchors[:, 2] + anchors[:, 0])
        y = (predictions[:, 1] * anchors[:, 3] + anchors[:, 1])
        w = (torch.exp(predictions[:, 2]) * anchors[:, 2])
        h = (torch.exp(predictions[:, 3]) * anchors[:, 3])

        bounding_boxes = torch.stack((x, y, w, h), dim=1).cpu().data.numpy()
        bounding_boxes = change_coordinate_inv(bounding_boxes)

        scores = scores.cpu().data.numpy()
        klass = klass.cpu().data.numpy()
        bboxes_scores = np.hstack(
            (bounding_boxes, np.array(list(zip(*(scores, klass))))))

        # nms
        keep = nms(bboxes_scores)
        return bboxes_scores[keep]

    def forward(self, batched_data):
        """predict with pytorch dataset output

        Args:
            batched_data (tensor): yield by the dataset
        Returns: predicted coordinate and score
        """
        images = batched_data[0].permute(0, 3, 1, 2).to(device).float()
        predictions = list(zip(*list(self.model(images))))
        result = []

        for i, prediction in enumerate(predictions):
            prediction = list(prediction)
            anchors = []
            for k, feature_map_prediction in enumerate(prediction):
                # create anchors of this feature_map_prediction layer

                if (k % 2) == 0:
                    anchors.append(
                        np.array(
                            anchors_of_feature_map(
                                Config.ANCHOR_STRIDE[k // 2],
                                Config.ANCHOR_SIZE[k // 2],
                                feature_map_prediction.size()[1:])))

                prediction[k] = feature_map_prediction \
                    .view(feature_map_prediction.size()[0], -1) \
                    .permute(1, 0).contiguous()

            reg_preds = torch.cat(prediction[::2])
            cls_preds = torch.cat(prediction[1::2])

            anchors = torch.tensor(np.vstack(anchors))

            result.append(
                self.convert_predictions(
                    torch.cat((reg_preds, cls_preds), dim=1),
                    batched_data[2][i], anchors))

        return result

    def infer(self, image):
        image = cv2.imread(image)
        image = image - np.array([104, 117, 123], dtype=np.uint8)

        _input = torch.tensor(image).permute(2, 0, 1).float() \
            .to(device).unsqueeze(0)

        predictions = self.model(_input)
        # flatten predictions
        reg_preds = []
        cls_preds = []
        anchors = []
        for index, prediction in enumerate(predictions):
            if (index % 2) == 0:
                anchors.append(
                    np.array(
                        anchors_of_feature_map(
                            Config.ANCHOR_STRIDE[index // 2],
                            Config.ANCHOR_SIZE[index // 2],
                            prediction.size()[2:])))

            predictions[index] = prediction.squeeze().view(
                prediction.size()[1], -1).permute(1, 0)

        anchors = torch.tensor(np.vstack(anchors))
        reg_preds = torch.cat(predictions[::2])
        cls_preds = torch.cat(predictions[1::2])

        return self.convert_predictions(
            torch.cat((reg_preds, cls_preds), dim=1), None, anchors)
    torchvision.transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
])
trainloader = torch.utils.data.DataLoader(torchvision.datasets.ImageFolder(
    train_dir, transform=transform_train),
                                          batch_size=64,
                                          shuffle=True)
testloader = torch.utils.data.DataLoader(torchvision.datasets.ImageFolder(
    test_dir, transform=transform_test),
                                         batch_size=64,
                                         shuffle=True)
num_classes = len(trainloader.dataset.classes)

# net definition
start_epoch = 0
net = Net(num_classes=num_classes)
if args.resume:
    assert os.path.isfile(
        "./checkpoint/ckpt.t7"), "Error: no checkpoint file found!"
    print('Loading from checkpoint/ckpt.t7')
    checkpoint = torch.load("./checkpoint/ckpt.t7")
    # import ipdb; ipdb.set_trace()
    net_dict = checkpoint['net_dict']
    for w in ['classifier.4.weight', 'classifier.4.bias']:
        net_dict['old_' + w] = net_dict.pop(w)
    net.load_state_dict(net_dict, strict=False)
    #best_acc = checkpoint['acc']
    #start_epoch = checkpoint['epoch']
net.to(device)

# loss and optimizer
Beispiel #26
0
                                           batch_size=batch_size,
                                           num_workers=4,
                                           shuffle=True,
                                           pin_memory=True)

val_loader = torch.utils.data.DataLoader(QAdataset(
    para_path='./val_para.csv',
    ques_path='./val_ques.csv',
    label_path='./val_label.csv',
    test_mode=False),
                                         batch_size=batch_size,
                                         num_workers=4,
                                         shuffle=False,
                                         pin_memory=True)

model = Net(input_sizes, hidden_sizes, num_class).to(device)
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

total_step = len(train_loader)
for epoch in range(num_epochs):
    for i, (x, h, y) in enumerate(train_loader):
        x = x.to(device)
        h = h.to(device)
        y = y.to(device)
        # Forward pass
        outputs = model(x, h)
        loss = criterion(outputs, y)
        # Backward and optimize
        optimizer.zero_grad()
        loss.backward()
Beispiel #27
0
                                **kwargs)
val_loader = utils.DataLoader(val_dataset,
                              batch_size=args.test_batch_size,
                              shuffle=True,
                              **kwargs)
test_loader = utils.DataLoader(test_dataset,
                               batch_size=args.test_batch_size,
                               shuffle=True,
                               **kwargs)

blobs = np.zeros((28 * 28, 28, 28))
for i in range(28):
    for j in range(28):
        blobs[i * 28 + j, i, j] = 1

model = Net().to(device)

optimizer = optim.Adam(model.parameters(), weight_decay=0.001)


def train(args,
          model,
          device,
          train_loader,
          optimizer,
          epoch,
          regularizer_rate,
          until_batch=-1):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        if until_batch != -1 and batch_idx > until_batch:
Beispiel #28
0
def main(args):
    bsz = args.batch_size
    device = torch.device("cuda" if (
        torch.cuda.is_available() and not args.disable_cuda) else "cpu")

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([125 / 255, 124 / 255, 115 / 255],
                             [60 / 255, 59 / 255, 64 / 255])
    ])

    trainset = CIFAR10_4x(root=os.path.join(base_dir, 'data'),
                          split="train",
                          transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=bsz,
                                              shuffle=True,
                                              num_workers=args.num_workers)

    validset = CIFAR10_4x(root=os.path.join(base_dir, 'data'),
                          split='valid',
                          transform=transform)
    validloader = torch.utils.data.DataLoader(validset,
                                              batch_size=bsz,
                                              shuffle=False,
                                              num_workers=args.num_workers)

    net = Net()
    print("number of trained parameters: %d" % (sum([
        param.nelement() for param in net.parameters() if param.requires_grad
    ])))
    print("number of total parameters: %d" %
          (sum([param.nelement() for param in net.parameters()])))

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(net.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    net.to(device)
    best_acc = 0
    for epoch in range(args.num_epoch):  # loop over the dataset multiple times

        running_loss = deque([], maxlen=args.log_interval)
        for i, data in enumerate(trainloader, 0):
            # get the inputs; data is a list of [inputs, labels]
            # inputs, labels = data
            inputs, labels = data[0].to(device), data[1].to(device)

            # zero the parameter gradients
            optimizer.zero_grad()
            # forward + backward + optimize
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss.append(loss.item())
            if i % args.log_interval == args.log_interval - 1:  # print every 2000 mini-batches
                print(
                    '[%d, %5d] loss: %.3f' %
                    (epoch + 1, i + 1, sum(running_loss) / len(running_loss)))

        if epoch % args.save_interval == args.save_interval - 1:
            acc = evaluation(net, validloader, device)
            torch.save(
                net,
                os.path.join(args.model_dir,
                             'cifar10_4x_{}.pth'.format(epoch + 1)))
            if acc > best_acc:
                torch.save(net,
                           os.path.join(args.model_dir, 'cifar10_4x_best.pth'))
                best_acc = acc
            net.train()
Beispiel #29
0
import torch
import cv2
import torch.nn as nn
from torchvision.transforms import transforms
from loader import MyDataset
from model import Net, convrelu
import os
import numpy as np

NUM_CLASSES = 14
model = Net(NUM_CLASSES,0.2)

def load_my_state_dict(model, state_dict):
    
    own_state = model.state_dict()
    
    for name, param in state_dict.items():
        if name not in own_state:
            print("[weight not copied for %s]"%(name)) 
            continue
        own_state[name].copy_(param)
    return model

model = load_my_state_dict(model,torch.load('/home/ravi/Ravi_D/Acadamic/UPENN/Fourth_sem/F1_10/model_best_200_balanced.pth'))

print('loaded model')

test_folder = '/home/ravi/Ravi_D/Acadamic/UPENN/Fourth_sem/F1_10/test_images/' 
test_images = os.listdir(test_folder)
print(len(test_images))
output_dir = '/home/ravi/Ravi_D/Acadamic/UPENN/Fourth_sem/F1_10/save_test/' 
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
import argparse
from model import Net

# 此文件加载已经训练好的白盒模型,以作展示原始模型效果所用
model = Net()
model.load_state_dict(torch.load("fashion_mnist_cnn.pt"))


def test(args, model, device, test_loader):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            # print(data.shape)
            output = model(data)
            test_loss += F.nll_loss(output, target, reduction='sum').item(
            )  # sum up batch loss 交叉熵本来就不用除以10 这里算的是所有样本的总损失
            pred = output.argmax(
                dim=1,
                keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)

    print(
        '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
Beispiel #31
0
cuda = opt.cuda
if cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

torch.manual_seed(opt.seed)
if cuda:
    torch.cuda.manual_seed(opt.seed)

print('===> Loading datasets')
train_set = get_training_set(opt.upscale_factor)
test_set = get_test_set(opt.upscale_factor)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)

print('===> Building model')
model = Net(upscale_factor=opt.upscale_factor)
criterion = nn.MSELoss()

if cuda:
    model = model.cuda()
    criterion = criterion.cuda()

optimizer = optim.Adam(model.parameters(), lr=opt.lr)


def train(epoch):
    epoch_loss = 0
    for iteration, batch in enumerate(training_data_loader, 1):
        input, target = Variable(batch[0]), Variable(batch[1])
        if cuda:
            input = input.cuda()
Beispiel #32
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Test Super Resolution')
    parser.add_argument('--upscale_factor', default=3, type=int, help='super resolution upscale factor')
    parser.add_argument('--is_real_time', default=False, type=bool, help='super resolution real time to show')
    parser.add_argument('--delay_time', default=1, type=int, help='super resolution delay time to show')
    parser.add_argument('--model_name', default='epoch_3_100.pt', type=str, help='super resolution model name')
    opt = parser.parse_args()

    UPSCALE_FACTOR = opt.upscale_factor
    IS_REAL_TIME = opt.is_real_time
    DELAY_TIME = opt.delay_time
    MODEL_NAME = opt.model_name

    path = 'data/test/SRF_' + str(UPSCALE_FACTOR) + '/video/'
    videos_name = [x for x in listdir(path) if is_video_file(x)]
    model = Net(upscale_factor=UPSCALE_FACTOR)
    if torch.cuda.is_available():
        model = model.cuda()
    # for cpu
    # model.load_state_dict(torch.load('epochs/' + MODEL_NAME, map_location=lambda storage, loc: storage))
    model.load_state_dict(torch.load('epochs/' + MODEL_NAME))

    out_path = 'results/SRF_' + str(UPSCALE_FACTOR) + '/'
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    for video_name in tqdm(videos_name, desc='convert LR videos to HR videos'):
        videoCapture = cv2.VideoCapture(path + video_name)
        if not IS_REAL_TIME:
            fps = videoCapture.get(cv2.CAP_PROP_FPS)
            size = (int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH) * UPSCALE_FACTOR),
                    int(videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)) * UPSCALE_FACTOR)
Beispiel #33
0
if opt.cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

torch.manual_seed(opt.seed)

device = torch.device("cuda" if opt.cuda else "cpu")

print('===> Loading datasets')
train_set = get_training_set(opt.upscale_factor)
test_set = get_test_set(opt.upscale_factor)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)

print('===> Building model')
model = Net(upscale_factor=opt.upscale_factor).to(device)
criterion = nn.MSELoss()

optimizer = optim.Adam(model.parameters(), lr=opt.lr)


def train(epoch):
    epoch_loss = 0
    for iteration, batch in enumerate(training_data_loader, 1):
        input, target = batch[0].to(device), batch[1].to(device)

        optimizer.zero_grad()
        loss = criterion(model(input), target)
        epoch_loss += loss.item()
        loss.backward()
        optimizer.step()
    text_retrieval_val = text_retrieval
    vid_retrieval_val = vid_retrieval
    flow_retrieval_val = flow_retrieval
    face_retrieval_val = face_retrieval
    audio_retrieval_val = audio_retrieval


    face_ind_test = np.load(os.path.join(root_feat,'no_face_ind_retrieval.npy'))
    face_ind_test = 1 - face_ind_test
print 'Done.'

# Model
video_modality_dim = {'face': (128,128), 'audio': (128*16,128),
'visual': (2048,2048), 'motion': (1024,1024)}
net = Net(video_modality_dim,300,
        audio_cluster=16,text_cluster=args.text_cluster_size)
net.train()

if args.GPU:
    net.cuda()

# Optimizers + Loss
max_margin = MaxMarginRankingLoss(margin=args.margin) 


if args.optimizer == 'adam':
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
elif args.optimizer == 'sgd':
    optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum)

if args.GPU: