Esempio n. 1
0
def main():
    '''
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    model = MyModel()
    model.cuda()
    cudnn.benchmark = True
    '''

    train_loader = torch.utils.data.DataLoader(MyDataset(
        args.train_path,
        img_transform=Compose([
            Resize(256, interpolation=BILINEAR),
            CenterCrop(224),
            ToTensor(),
            Normalize(mean=[.485, .456, .406], std=[.229, .224, .225])
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    for i, (input, target) in enumerate(train_loader):
        # measure data loading time
        a = 0
    '''
Esempio n. 2
0
def get_dataset(name, split='train', transform=None,
                target_transform=None, download=True):
    train = (split == 'train')
    if name == 'cifar10':
        # return datasets.CIFAR10(root=_dataset_path['cifar10'],
        #                         train=train,
        #                         transform=transform,
        #                         target_transform=target_transform,
        #                         download=download)
        return MyDataset(root=_dataset_path['cifar10'],
                                train=train,
                                transform=transform,
                                target_transform=target_transform,
                                download=download)
    elif name == 'cifar100':
        return datasets.CIFAR100(root=_dataset_path['cifar100'],
                                 train=train,
                                 transform=transform,
                                 target_transform=target_transform,
                                 download=download)
    elif name == 'imagenet':
        path = _dataset_path[name][split]
        return datasets.ImageFolder(root=path,
                                    transform=transform,
                                    target_transform=target_transform)
    def __init__(self):
        self.config = GlobalConfig()
        os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
        print("torch.distributed.is_available: " +
              str(torch.distributed.is_available()))
        print("Device Count: {0}".format(torch.cuda.device_count()))

        transform = transforms.Compose([
            transforms.Resize(self.config.Width),
            transforms.RandomCrop(self.config.Width),
            transforms.ToTensor(),
            transforms.Normalize(mean=self.config.mean, std=self.config.std)
        ])
        # Creates training set
        self.train_loader = torch.utils.data.DataLoader(
            datasets.ImageFolder(self.config.TRAIN_PATH, transform),
            batch_size=self.config.train_batch_size,
            num_workers=4,
            pin_memory=True,
            shuffle=True,
            drop_last=True)

        self.train_dataset = MyDataset(root='F:\\ILSVRC2012_img_val\\',
                                       filename='./val.txt')
        print(len(self.train_dataset))
        self.train_loader = data.DataLoader(
            dataset=self.train_dataset,
            batch_size=self.config.train_batch_size,
            shuffle=True,
            num_workers=4)

        self.net = HighQualityNet(config=self.config)
Esempio n. 4
0
def main():
    iris = load_iris()
    X = iris.data[:10, :]

    my_dataset = MyDataset(X)

    for i in range(1, 6, 1):
        print(f'{i}-th check:')
        check(my_dataset)
    
    print('DONE')
Esempio n. 5
0
def main():
    parser = argparse.ArgumentParser(description='Chainer example: MNIST')
    parser.add_argument('--modelpath', '-m', default='result/mymlp.model',
                        help='Model path to be loaded')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--unit', '-u', type=int, default=50,
                        help='Number of units')
    parser.add_argument('--batchsize', '-b', type=int, default=10,
                        help='Number of images in each mini-batch')
    args = parser.parse_args()

    batchsize = args.batchsize
    # Load the custom dataset
    dataset = MyDataset('data/my_data.csv')
    train_ratio = 0.7
    train_size = int(len(dataset) * train_ratio)
    train, test = chainer.datasets.split_dataset_random(dataset, train_size, seed=13)

    # Load trained model
    model = MyMLP(args.unit)  # type: MyMLP
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        model.to_gpu()  # Copy the model to the GPU
    xp = np if args.gpu < 0 else cuda.cupy

    serializers.load_npz(args.modelpath, model)

    # Predict
    x_test, y_test, t_test = model.predict2(test)
    print('x', x_test)
    print('y', y_test)
    print('t', t_test)

    plt.figure()
    plt.plot(x_test, t_test, 'o', label='test actual')
    plt.plot(x_test, y_test, 'o', label='test predict')
    plt.legend()
    plt.savefig('predict2.png')
Esempio n. 6
0
    BATCH_SIZE = 1
    NUM_EPOCHS = 200

    ### use for day2night dataset
    # !wget http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/night2day.tar.gz
    # !tar xvzf night2day.tar.gz
    # BATCH_SIZE = 4
    # NUM_EPOCHS = 17

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(device)

    set_seed(21)

    ### dataset creation
    train_set = MyDataset(root_dir='facades/train/', transform=my_transforms_tr)
    val_set   = MyDataset(root_dir='facades/val/', transform=my_transforms_val)
    test_set  = MyDataset(root_dir='facades/test/', transform=my_transforms_val)

    ### day2night
    # train_set = MyDataset(root_dir='night2day/train/', transform=my_transforms_tr)
    # val_set   = MyDataset(root_dir='night2day/val/', transform=my_transforms_val)
    # test_set  = MyDataset(root_dir='night2day/test/', transform=my_transforms_val)

    ### dataloaders
    train_loader = DataLoader(train_set, batch_size=BATCH_SIZE,
                              shuffle=True,
                              num_workers=2, pin_memory=True)

    val_loader = DataLoader(val_set, batch_size=BATCH_SIZE,
                            shuffle=False,
Esempio n. 7
0
        out = self.dropout(out)

        #Fully connected 1
        out = self.fc1(out)
        return out


import torch.optim as optim
import time

#Loading the data

times = []
ACRS = []

train_dataset = MyDataset(train_features.reshape(7665, 12288), train_labels)

network = Network().double()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(network.parameters(), lr=0.001)

now = time.time()

accuracy_old = 0

listofloss_training = []
ACRS_validation = []
listofloss_validation = []
epochh = []

for epoch in range(100):
def main():
    parser = argparse.ArgumentParser(description='Train custom dataset')
    parser.add_argument('--batchsize', '-b', type=int, default=10,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch', '-e', type=int, default=20,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out', '-o', default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume', '-r', default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--unit', '-u', type=int, default=50,
                        help='Number of units')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# unit: {}'.format(args.unit))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    # Classifier reports softmax cross entropy loss and accuracy at every
    # iteration, which will be used by the PrintReport extension below.
    model = MyMLP(args.unit)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        model.to_gpu()  # Copy the model to the GPU

    # Setup an optimizer
    optimizer = chainer.optimizers.MomentumSGD()
    optimizer.setup(model)

    # Load the dataset and separate to train data and test data
    dataset = MyDataset('data/my_data.csv')
    train_ratio = 0.7
    train_size = int(len(dataset) * train_ratio)
    train, test = chainer.datasets.split_dataset_random(dataset, train_size, seed=13)

    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test, args.batchsize, repeat=False, shuffle=False)

    # Set up a trainer
    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    # Evaluate the model with the test dataset for each epoch
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))

    # Dump a computational graph from 'loss' variable at the first iteration
    # The "main" refers to the target link of the "main" optimizer.
    trainer.extend(extensions.dump_graph('main/loss'))

    # Take a snapshot at each epoch
    #trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))
    trainer.extend(extensions.snapshot(), trigger=(1, 'epoch'))

    # Write a log of evaluation statistics for each epoch
    trainer.extend(extensions.LogReport())

    # Print selected entries of the log to stdout
    # Here "main" refers to the target link of the "main" optimizer again, and
    # "validation" refers to the default name of the Evaluator extension.
    # Entries other than 'epoch' are reported by the Classifier link, called by
    # either the updater or the evaluator.
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss', 'elapsed_time']))

    # Plot graph for loss for each epoch
    if extensions.PlotReport.available():
        trainer.extend(extensions.PlotReport(
            ['main/loss', 'validation/main/loss'],
            x_key='epoch', file_name='loss.png'))
    else:
        print('Warning: PlotReport is not available in your environment')
    # Print a progress bar to stdout
    trainer.extend(extensions.ProgressBar())

    if args.resume:
        # Resume from a snapshot
        serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
    serializers.save_npz('{}/mymlp.model'.format(args.out), model)
Esempio n. 9
0
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--epoch', type=int, default=20)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--label_num', type=int, default=6)
parser.add_argument('--seed', type=int, default=2020)
args = parser.parse_args()

# 设置随机种子
random.seed(2020)
np.random.seed(2020)
torch.manual_seed(2020)
torch.cuda.manual_seed(2020)

device = torch.device("cuda:0")

training_set = MyDataset(url_train_data)

train_loader = data.DataLoader(dataset=training_set,
                               batch_size=128,
                               shuffle=True)

valid_data = MyDataset(url_valid_data)
valid_loader = DataLoader(dataset=valid_data, batch_size=128)

config = Config()
model = Transformer(config)

if torch.cuda.is_available():
    model.cuda()

criterion = nn.CrossEntropyLoss()
Esempio n. 10
0
    'train': transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    'val': transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}

train_loader = DataLoader(
    MyDataset('../256_ObjectCategories/', '../data_additional/train.txt', data_transforms['train']),
    batch_size=TRAIN_BATCH_SIZE, shuffle=True, **kwargs
)
test_loader = DataLoader(
    MyDataset('../256_ObjectCategories/', '../data_additional/test.txt', data_transforms['val']),
    batch_size=TEST_BATCH_SIZE, shuffle=False, **kwargs
)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# DenseNet121
model_ft = models.densenet121(pretrained=True)
for param in model_ft.parameters():
    param.requires_grad = False
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, 257)
Esempio n. 11
0
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = ".".join(k.split(".")[start_idx:])
        new_state_dict[name] = v
    return new_state_dict


if __name__ == '__main__':
    """参数设置"""
    # device = 'cuda' # cpu 或 cuda
    device = 'cpu'  # cpu 或 cuda
    dataset_path = './data'  # 自己数据集的路径
    pretrained_path = './pretrained/craft_mlt_25k.pth'  # 预训练模型的存放路径
    model_path = './models'  # 现在训练的模型要存储的路径

    dataset = MyDataset(dataset_path)
    loader = DataLoader(dataset, batch_size=1, shuffle=True)
    net = CRAFT(phase='train').to(device)
    net.load_state_dict(
        copyStateDict(torch.load(pretrained_path, map_location=device)))
    criterion = nn.MSELoss(size_average=False).to(device)
    optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                       net.parameters()),
                                1e-7,
                                momentum=0.95,
                                weight_decay=0)
    if not os.path.exists(model_path):
        os.mkdir(model_path)

    for epoch in range(500):
        epoch_loss = 0