예제 #1
0
def main():
    """
    Main function wrapper for demo script
    """

    random.seed(args["SEED"])
    np.random.seed(args["SEED"])
    torch.manual_seed(args["SEED"])
    if torch.cuda.is_available():
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    if args["TRAINED_WEIGHTS_FILE"] is not None:

        print("Trained Weights File: %s" % (args["TRAINED_WEIGHTS_FILE"]))
        print("Demo Directory: %s" % (args["DEMO_DIRECTORY"]))

        model = MyNet()
        model.load_state_dict(
            torch.load(
                args["CODE_DIRECTORY"] + args["TRAINED_WEIGHTS_FILE"],
                map_location=device,
            ))
        model.to(device)

        print("Running Demo ....")

        for root, dirs, files in os.walk(args["DEMO_DIRECTORY"]):
            for file in files:

                sampleFile = os.path.join(root, file)

                preprocess_sample(sampleFile)

                inp, _ = prepare_input(sampleFile)
                inputBatch = torch.unsqueeze(inp, dim=0)

                inputBatch = (inputBatch.float()).to(device)

                model.eval()
                with torch.no_grad():
                    outputBatch = model(inputBatch)

                predictionBatch = decode(outputBatch)
                pred = predictionBatch[0][:]

                print("File: %s" % (file))
                print("Prediction: %s" % (pred))
                print("\n")

        print("Demo Completed.")

    else:
        print("Path to trained weights file not specified.")

    return
예제 #2
0
    def __init__(self, hparams):
        super(MyModel, self).__init__()
        self.hparams = hparams
        self.batch_size = hparams.batch_size

        self._create_dataset()
        # self.model = MyNet(len(self.dx_voc.word2idx)+len(
        #     self.rx_voc.word2idx), hparams.h_dim, hparams.heads, hparams.has_attn, hparams.has_hidden_attn)
        self.model = MyNet(
            len(self.dx_voc.word2idx) + len(self.rx_voc.word2idx) + 1,
            hparams.h_dim)
예제 #3
0
    def __init__(self, hparams):
        super(MyModel, self).__init__()
        self.hparams = hparams
        self.batch_size = hparams.batch_size

        self._create_dataset()

        self.model = MyNet(len(self.dx_voc.word2idx) +
                           len(self.rx_voc.word2idx),
                           hparams.h_dim,
                           n_layers=hparams.n_layers,
                           only_rnn=hparams.only_rnn,
                           is_multi=hparams.is_multi)
예제 #4
0
    def load_model(self):
        """

        :return:
        """
        # TODO 1 加载模型
        use_cuda = self.use_cuda
        if self.o_net_path is not None:
            print('=======> loading')
            net = MyNet(use_cuda=False)
            net.load_state_dict(torch.load(self.o_net_path))
            if (use_cuda):
                net.to('cpu')
            net.eval()

        # TODO 2 准备好数据
        img_list = os.listdir(self.image_dir)
        for idx, item in enumerate(img_list):
            _img = Image.open(os.path.join(self.image_dir, item))
            parse_result = self.parse_image_name(item)
            landmark_and_format = parse_result['landmark_and_format']
            name = parse_result['name']
            img = self.transforms(_img)
            img = img.unsqueeze(0)

            pred = net(img)

            pred = pred * 192
            # pred = pred.detach().numpy()

            print('the pred landmark is :', pred)

            print("=" * 20)
            # # print(pred.shape)
            # # print(landmark)
            #
            try:
                self.save_pred(_img, name, landmark_and_format,
                               pred.detach().numpy())
            # self.visualize(_img, np.array(landmark))
            # self.visualize(_img, pred.detach().numpy())
            # # print(pred)
            except:
                print('Error:', item)
예제 #5
0
def main():
    """
    Main function wrapper for testing script.
    """

    random.seed(args["SEED"])
    np.random.seed(args["SEED"])
    torch.manual_seed(args["SEED"])
    if torch.cuda.is_available():
        device = torch.device("cuda")
        kwargs = {"num_workers": args["NUM_WORKERS"], "pin_memory": True}
    else:
        device = torch.device("cpu")
        kwargs = {}

    if args["TRAINED_WEIGHTS_FILE"] is not None:

        testData = MyDataset("test", datadir=args["DATA_DIRECTORY"])
        testLoader = DataLoader(testData,
                                batch_size=args["BATCH_SIZE"],
                                shuffle=True,
                                **kwargs)

        print("Trained Weights File: %s" % (args["TRAINED_WEIGHTS_FILE"]))

        model = MyNet()
        model.load_state_dict(
            torch.load(
                args["CODE_DIRECTORY"] + args["TRAINED_WEIGHTS_FILE"],
                map_location=device,
            ))
        model.to(device)

        criterion = MyLoss()
        regularizer = L2Regularizer(lambd=args["LAMBDA"])

        print("Testing the trained model ....")

        testLoss, testMetric = evaluate(model, testLoader, criterion,
                                        regularizer, device)

        print("| Test Loss: %.6f || Test Metric: %.3f |" %
              (testLoss, testMetric))
        print("Testing Done.")

    else:
        print("Path to the trained weights file not specified.")

    return
예제 #6
0
def main():
    # Load dataset
    print('Loading dataset ...\n')
    dataset_train = Dataset(train=True)
    dataset_val = Dataset(train=False)
    loader_train = DataLoader(dataset=dataset_train,
                              num_workers=4,
                              batch_size=opt.batchSize,
                              shuffle=True)
    print("# of training samples: %d\n" % int(len(dataset_train)))
    # Build model
    # net = DnCNN(channels=1, num_of_layers=opt.num_of_layers)
    net = MyNet()
    # net.apply(weights_init_kaiming)
    s = MS_SSIM(data_range=1., channel=1)
    criterion = nn.L1Loss()
    # Move to GPU
    device_ids = [0]
    model = nn.DataParallel(net, device_ids=device_ids).cuda()
    criterion.cuda()
    summary(model, (1, 96, 96))
    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    # training
    writer = SummaryWriter(opt.outf)
    step = 0
    noiseL_B = [0, 55]  # ignored when opt.mode=='S'
    for epoch in range(opt.epochs):
        current_lr = opt.lr * (0.5**(epoch // opt.step))
        # set learning rate
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('learning rate %f' % current_lr)
        # train
        for i, data in enumerate(loader_train, 0):
            # training step
            model.train()
            model.zero_grad()
            optimizer.zero_grad()
            img_train = data
            if opt.mode == 'S':
                noise = torch.FloatTensor(img_train.size()).normal_(
                    mean=0, std=opt.noiseL / 255.)
            if opt.mode == 'B':
                noise = torch.zeros(img_train.size())
                stdN = np.random.uniform(noiseL_B[0],
                                         noiseL_B[1],
                                         size=noise.size()[0])
                for n in range(noise.size()[0]):
                    sizeN = noise[0, :, :, :].size()
                    noise[n, :, :, :] = torch.FloatTensor(sizeN).normal_(
                        mean=0, std=stdN[n] / 255.)
            imgn_train = img_train + noise
            img_train, imgn_train = Variable(img_train.cuda()), Variable(
                imgn_train.cuda())
            noise = Variable(noise.cuda())
            out_train = model(imgn_train)
            # loss = criterion(out_train, noise) / (imgn_train.size()[0]*2)
            loss = criterion(out_train, img_train)
            loss.backward()
            # clip = 0.01 / current_lr
            # nn.utils.clip_grad_norm(model.parameters(), clip)
            optimizer.step()
            # results
            model.eval()
            # out_train = torch.clamp(imgn_train-model(imgn_train), 0., 1.)
            out_train = torch.clamp(model(imgn_train), 0., 1.)
            psnr_train = batch_PSNR(out_train, img_train, 1.)
            '''
            print("[epoch %d][%d/%d] loss: %.4f PSNR_train: %.4f" %
                (epoch+1, i+1, len(loader_train), loss.item(), psnr_train))
            '''
            # if you are using older version of PyTorch, you may need to change loss.item() to loss.data[0]
            if i % int(len(loader_train) // 1) == 0:
                # the end of each epoch
                model.eval()
                # validate
                psnr_val = 0
                for k in range(len(dataset_val)):
                    img_val = torch.unsqueeze(dataset_val[k], 0)
                    noise = torch.FloatTensor(img_val.size()).normal_(
                        mean=0, std=opt.val_noiseL / 255.)
                    imgn_val = img_val + noise
                    with torch.no_grad():
                        img_val, imgn_val = Variable(img_val.cuda()), Variable(
                            imgn_val.cuda())
                        # out_val = torch.clamp(imgn_val-model(imgn_val), 0., 1.)
                        out_val = torch.clamp(model(imgn_val), 0., 1.)
                    psnr_val += batch_PSNR(out_val, img_val, 1.)
                psnr_val /= len(dataset_val)
                # print("\n[epoch %d] PSNR_val: %.4f" % (epoch+1, psnr_val))
                print("[epoch %d][%d/%d] loss: %.4f PSNR_val: %.4f" %
                      (epoch + 1, i + 1, len(loader_train), loss.item(),
                       psnr_val))
                torch.save(
                    model.state_dict(),
                    os.path.join(opt.outf,
                                 'net_' + str(round(psnr_val, 4)) + '.pth'))
                # Log the scalar values
                writer.add_scalar('loss', loss.item(), step)
                writer.add_scalar('PSNR on training data', psnr_train, step)
            step += 1
        '''
예제 #7
0
def main():

    """
    Main function wrapper for training script.
    """

    matplotlib.use("Agg")
    random.seed(args["SEED"])
    np.random.seed(args["SEED"])
    torch.manual_seed(args["SEED"])
    if torch.cuda.is_available():
        device = torch.device("cuda")
        kwargs = {"num_workers": args["NUM_WORKERS"], "pin_memory": True}
    else:
        device = torch.device("cpu")
        kwargs = {}

    trainData = MyDataset("train", datadir=args["DATA_DIRECTORY"])
    valSize = int(args["VALIDATION_SPLIT"] * len(trainData))
    trainSize = len(trainData) - valSize
    trainData, valData = random_split(trainData, [trainSize, valSize])
    trainLoader = DataLoader(
        trainData, batch_size=args["BATCH_SIZE"], shuffle=True, **kwargs
    )
    valLoader = DataLoader(
        valData, batch_size=args["BATCH_SIZE"], shuffle=True, **kwargs
    )

    model = MyNet()
    model.to(device)
    optimizer = optim.Adam(
        model.parameters(),
        lr=args["LEARNING_RATE"],
        betas=(args["MOMENTUM1"], args["MOMENTUM2"]),
    )
    scheduler = optim.lr_scheduler.ExponentialLR(
        optimizer, gamma=args["LR_DECAY"]
    )
    criterion = MyLoss()
    regularizer = L2Regularizer(lambd=args["LAMBDA"])

    if os.path.exists(args["CODE_DIRECTORY"] + "/checkpoints"):
        while True:
            char = input(
                "Continue and remove the 'checkpoints' directory? y/n: "
            )
            if char == "y":
                break
            if char == "n":
                sys.exit()
            else:
                print("Invalid input")
        shutil.rmtree(args["CODE_DIRECTORY"] + "/checkpoints")

    os.mkdir(args["CODE_DIRECTORY"] + "/checkpoints")
    os.mkdir(args["CODE_DIRECTORY"] + "/checkpoints/plots")
    os.mkdir(args["CODE_DIRECTORY"] + "/checkpoints/weights")

    if args["PRETRAINED_WEIGHTS_FILE"] is not None:
        print(
            "Pretrained Weights File: %s" % (args["PRETRAINED_WEIGHTS_FILE"])
        )
        print("Loading the pretrained weights ....")
        model.load_state_dict(
            torch.load(
                args["CODE_DIRECTORY"] + args["PRETRAINED_WEIGHTS_FILE"],
                map_location=device,
            )
        )
        model.to(device)
        print("Loading Done.")

    trainingLossCurve = list()
    validationLossCurve = list()
    trainingMetricCurve = list()
    validationMetricCurve = list()

    numTotalParams, numTrainableParams = num_params(model)
    print("Number of total parameters in the model = %d" % (numTotalParams))
    print(
        "Number of trainable parameters in the model = %d"
        % (numTrainableParams)
    )

    print("Training the model ....")

    for epoch in range(1, args["NUM_EPOCHS"] + 1):

        trainingLoss, trainingMetric = train(
            model, trainLoader, optimizer, criterion, regularizer, device
        )
        trainingLossCurve.append(trainingLoss)
        trainingMetricCurve.append(trainingMetric)

        validationLoss, validationMetric = evaluate(
            model, valLoader, criterion, regularizer, device
        )
        validationLossCurve.append(validationLoss)
        validationMetricCurve.append(validationMetric)

        print(
            (
                "| Epoch: %03d |"
                "| Tr.Loss: %.6f  Val.Loss: %.6f |"
                "| Tr.Metric: %.3f  Val.Metric: %.3f |"
            )
            % (
                epoch,
                trainingLoss, validationLoss,
                trainingMetric, validationMetric,
            )
        )

        scheduler.step()

        if epoch % args["SAVE_FREQUENCY"] == 0:

            savePath = (
                args["CODE_DIRECTORY"]
                + "/checkpoints/weights/epoch_{:04d}-metric_{:.3f}.pt"
            ).format(epoch, validationMetric)
            torch.save(model.state_dict(), savePath)

            plt.figure()
            plt.title("Loss Curves")
            plt.xlabel("Epoch No.")
            plt.ylabel("Loss value")
            plt.plot(
                list(range(1, len(trainingLossCurve) + 1)),
                trainingLossCurve,
                "blue",
                label="Train",
            )
            plt.plot(
                list(range(1, len(validationLossCurve) + 1)),
                validationLossCurve,
                "red",
                label="Validation",
            )
            plt.legend()
            plt.savefig(
                (
                    args["CODE_DIRECTORY"]
                    + "/checkpoints/plots/epoch_{:04d}_loss.png"
                ).format(epoch)
            )
            plt.close()

            plt.figure()
            plt.title("Metric Curves")
            plt.xlabel("Epoch No.")
            plt.ylabel("Metric")
            plt.plot(
                list(range(1, len(trainingMetricCurve) + 1)),
                trainingMetricCurve,
                "blue",
                label="Train",
            )
            plt.plot(
                list(range(1, len(validationMetricCurve) + 1)),
                validationMetricCurve,
                "red",
                label="Validation",
            )
            plt.legend()
            plt.savefig(
                (
                    args["CODE_DIRECTORY"]
                    + "/checkpoints/plots/epoch_{:04d}_metric.png"
                ).format(epoch)
            )
            plt.close()

    print("Training Done.")

    return
예제 #8
0
파일: train.py 프로젝트: daben233-bit/SNdet
def train(**kwargs):
    # 1. configure model
    cfg._parse(kwargs)
    model = MyNet()
    if cfg.load_model_path:
        model.load_state_dict(torch.load(cfg.load_model_path))

    if cfg.multi_gpu:
        model = parallel.DataParallel(model)
    
    if cfg.use_gpu:
        model.cuda()
    
    
    # 2. prepare data
    train_data = SN(root=cfg.train_data_root, crop_size=cfg.crop_size)
    train_loader = DataLoader(train_data, batch_size=cfg.batch_size, shuffle=True)

    # 3. criterion (already imported) and optimizer
    lr = cfg.lr
    # optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=cfg.weight_decay)
    optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=cfg.momentum)

    # 4. meters
    loss_meter = meter.AverageValueMeter()
    previous_loss = 1e10

    # train
    for epoch in range(cfg.max_epoch):
        print('epoch %s: ===========================' % epoch)
        loss_meter.reset()

        for ii, (data, label_group) in tqdm(enumerate(train_loader)):
            # train model
            if cfg.use_gpu:
                data = data.cuda()
                label_group = [label.cuda() for label in label_group]
            data = Variable(data).float()
            label_group = [Variable(label) for label in label_group]
           
            optimizer.zero_grad()
            score = model(data)
            # for item in score:
            #     print(item)
            loss = criterion(score, label_group, batch_size=cfg.batch_size, neg_pos_ratio=cfg.neg_pos_ratio)
            loss.backward()
            optimizer.step()

            # meters update and print
            loss_meter.add(loss.item())
            if (ii + 1) % cfg.print_freq == 0:
                print(loss_meter.value()[0])
        
        if (epoch + 1) % cfg.save_freq == 0:
            torch.save(model.module.state_dict(), f'./checkpoints/last.pth')
        
        # update learning rate
        if loss_meter.value()[0] > previous_loss:
            lr = lr * cfg.lr_decay
            # 第二种降低学习率的方法:不会有moment等信息的丢失
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

        previous_loss = loss_meter.value()[0]
예제 #9
0
def visualize_net():
    x = torch.rand((1, 3, 768, 1024))
    model = MyNet()
    with SummaryWriter(comment='MyNet') as w:
        w.add_graph(model, (x, ))
예제 #10
0
파일: main.py 프로젝트: JohnFinn/gas
G = tg.utils.to_networkx(graph_dataset[0])
# G = nx.relabel_nodes(G, graph_dataset.dataset_.country_by_idx)
nx.draw(G,
        pos=graph_dataset.dataset_.location_getter(),
        labels=dataset.country_by_idx,
        with_labels=True)
plt.show()

pca = PCA(n_components=16)
data = dataset.df[[
    c for c in dataset.df.columns if isinstance(c, dt.datetime)
]].astype(float).T
pca.fit(data)

animator = AnimPlot('train loss', 'test loss')
my_net = MyNet()

optimizer = torch.optim.Adam(my_net.parameters(), lr=0.0001)

train_loader = torch.utils.data.DataLoader(train, 64)
test_loader = torch.utils.data.DataLoader(test, 10)

for epoch_no in range(10000):

    with torch.no_grad():
        test_loss = 0.0
        for X, target in test_loader:

            transformed = torch.tensor(pca.transform(X), dtype=torch.float32)

            predicted = my_net(transformed)