Пример #1
0
def main():
    IMAGE_PATH = "/home/gonken2019/Desktop/subProject/images"  #
    LABELS_PATH = "/home/gonken2019/Desktop/subProject/labels/"  #
    BATCH_SIZE = 512
    #BATCH_SIZE = 10
    #RuntimeError: size mismatch, m1: [10 x 12544], m2: [9216 x 4096] at /pytorch/aten/src/TH/generic/THTensorMath.cpp:197
    #9216×4096=37748736
    #37748736÷12544=3009.306122449
    #4096=2**12

    #BATCH_SIZE = 8
    #RuntimeError: size mismatch, m1: [8 x 12544], m2: [9216 x 4096] at /pytorch/aten/src/TH/generic/THTensorMath.cpp:197

    NUM_EPOCH = 50  #多くて20~30

    if torch.cuda.is_available():
        device = "cuda"
        print("[Info] Use CUDA")
    else:
        device = "cpu"
    model = AlexNet()
    dataloaders = Dataloaders(IMAGE_PATH, LABELS_PATH, BATCH_SIZE)

    # optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
    optimizer = torch.optim.AdamW(model.parameters(),
                                  lr=0.001,
                                  weight_decay=5e-4)
    #lossがnanになるのはよくあるので、こういうときはoptimizerを変えるか学習率変えるかするといい

    trainer = MyTrainer(model, dataloaders, optimizer, device)

    trainer.run(NUM_EPOCH)  #
def main():
    IMAGE_PATH = "/home/gonken2019/Desktop/subProject/dataset45"  #"/home/gonken2019/Desktop/subProject/images"#
    LABELS_PATH = "/home/gonken2019/Desktop/subProject/poseData45/"  #"/home/gonken2019/Desktop/subProject/labels/"#
    BATCH_SIZE = 256  #こことsubmodel.py 85行目と113行目の最初の引数を変える
    NUM_EPOCH = 20  #多くて20~30

    if torch.cuda.is_available():
        device = "cuda"
        print("[Info] Use CUDA")
    else:
        device = "cpu"
    model1 = AlexNet()
    model2 = PositionNet()
    dataloaders = Dataloaders(IMAGE_PATH, LABELS_PATH, BATCH_SIZE)

    # optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
    optimizer1 = torch.optim.AdamW(model1.parameters(),
                                   lr=0.00001,
                                   weight_decay=5e-4)
    optimizer2 = torch.optim.AdamW(model2.parameters(),
                                   lr=0.0001,
                                   weight_decay=5e-4)
    #lossがnanになるのはよくあるので、こういうときはoptimizerを変えるか学習率変えるかするといい

    trainer1 = MyTrainer(model1, dataloaders, optimizer1, device,
                         "Classification")
    trainer2 = MyTrainer(model2, dataloaders, optimizer2, device, "Regression")

    trainer1.run(NUM_EPOCH)
    trainer2.run(NUM_EPOCH)
Пример #3
0
def main():
    with open("config.json", "r") as f:
        config = json.load(f)

    # Load Cifar
    data = DataLoader(config)

    # Create AlexNet model
    net = AlexNet(config)

    # pdb.set_trace()

    # Create trainer
    trainer = Trainer(net.model, data, config)

    # Train model
    trainer.train()

    # Save model weights
    trainer.save_model()

    # Load weights
    net.model = trainer.load_model(config["load_model"])

    # Evaluation test set
    trainer.evaluate()
Пример #4
0
def eval(args):
    device = torch.device(f"cuda:{args.device_id}")
    model = AlexNet(n_cls = 100)
    model.to(device)
    model.load_state_dict(torch.load(args.pretrained_path))
    model.eval()

    test_loader = getLoaders(split="eval", batch_size = args.batch_size, num_workers=args.num_workers )

    pred_arr = []
    label_arr = []
    with torch.no_grad():
        for idx, (img, label) in tqdm(enumerate(test_loader),total= len(test_loader)):
            img = img.to(device)
            pred = model.pred(img)
            # mean of softmax prob from 10 different aug
            pred = pred.view(-1, 10, 100)
            pred = pred.mean(dim = 1) 
            pred_arr.append(pred.detach().cpu().numpy())
            label_arr.append(label.detach().numpy())
    pred_np = np.concatenate(pred_arr)
    label_np = np.concatenate(label_arr)
    top_1 = utils.top_k_acc(k = 1, pred = pred_np, label= label_np)
    top_5 = utils.top_k_acc(k = 5, pred = pred_np, label= label_np)
    confusion = utils.confusion_matrix(100, pred_np, label_np)
    torch.save({
        "top_1": top_1,
        "top_5": top_5,
        "confusion": confusion,
    }, "result.pth")
    print(f"top_1: {top_1*100:.2f}, top_5: {top_5*100:.2f}")
Пример #5
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    # load image
    img_path = "/home/zhongsy/datasets/imgs_2/2.jpg"
    assert os.path.exists(img_path), "file: '{}' dose not exist.".format(
        img_path)
    img = Image.open(img_path)

    plt.imshow(img)
    # [N, C, H, W]
    img = data_transform(img)
    # expand batch dimension
    img = torch.unsqueeze(img, dim=0)

    # read class_indict
    json_path = './class_indices.json'
    assert os.path.exists(json_path), "file: '{}' dose not exist.".format(
        json_path)

    json_file = open(json_path, "r")
    class_indict = json.load(json_file)

    # create model
    model = AlexNet(num_classes=2).to(device)

    # load model weights
    weights_path = "./AlexNet.pth"
    # assert os.path.exists(weights_path), "file: '{}' dose not exist.".format(weights_path)
    # model.load_state_dict(torch.load(weights_path))
    model = torch.load(weights_path)

    model.eval()
    with torch.no_grad():
        # predict class
        output = torch.squeeze(model(img.to(device))).cpu()
        predict = torch.softmax(output, dim=0)
        predict_cla = torch.argmax(predict).numpy()

    print_res = "class: {}   prob: {:.3}".format(
        class_indict[str(predict_cla)], predict[predict_cla].numpy())
    plt.title(print_res)
    print(print_res)
    plt.show()
Пример #6
0
    def load_model(self):
        if self.cuda:
            self.device = torch.device('cuda:0')
            cudnn.benchmark = True
        else:
            self.device = torch.device('cpu')

        # self.model = LeNet().to(self.device)
        self.model = AlexNet().to(self.device)

        self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
        self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer,
                                                        milestones=[75, 150],
                                                        gamma=0.5)
        self.criterion = nn.CrossEntropyLoss().to(self.device)
Пример #7
0
def main():
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    # Data
    print('==> Preparing dataset %s' % args.dataset)
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])
    if args.dataset == 'cifar10':
        dataloader = datasets.CIFAR10
        num_classes = 10
    else:
        dataloader = datasets.CIFAR100
        num_classes = 100


    trainset = dataloader(root=args.dataroot, train=True, download=True, transform=transform_train)
    trainloader = data.DataLoader(dataset=trainset, batch_size=args.train_batch, shuffle=False)

    testset = dataloader(root=args.dataroot, train=False, download=False, transform=transform_test)
    testloader = data.DataLoader(testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)

    # Model
    print("==> creating model '{}'".format("Alexnet"))
    model = AlexNet(num_classes=num_classes)
    model = model.cuda() 
    print('Model on cuda')
    cudnn.benchmark = True
    print('    Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)


    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)
        train_loss, train_acc = train(trainloader, model, criterion, optimizer, epoch, use_cuda)
        test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda)
        print('Epoch[{}/{}]: LR: {:.3f}, Train loss: {:.5f}, Test loss: {:.5f}, Train acc: {:.2f}, Test acc: {:.2f}.'.format(epoch+1, args.epochs, state['lr'], 
        train_loss, test_loss, train_acc, test_acc))
Пример #8
0
def main():
    _, _, test_x, test_y, label2name = cifar_10_data(FLAGS.data_dir)
    test_x = test_x[0:1000]
    test_y = test_y[0:1000]
    with tf.name_scope("input_data"):
        X = tf.placeholder(tf.float32, [None, 32, 32, 3], name='input')

    predict = AlexNet(X)
    with tf.name_scope("output_data"):
        Y = tf.placeholder(tf.int32, [None])
        Y_onehot = tf.cast(tf.one_hot(Y, 10, 1, 0), tf.float32)

    istrue = tf.equal(tf.argmax(predict, 1), tf.argmax(Y_onehot, 1))
    accuary = tf.reduce_mean(tf.cast(istrue, tf.float32))
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_onehot, logits=predict))
    with tf.Session() as sess:
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)
        ckpt = tf.train.get_checkpoint_state("checkpoint/")
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            sess.run(tf.global_variables_initializer())
        var_test_acc, var_predict, var_loss = sess.run([accuary, predict, loss], feed_dict={X: test_x, Y: test_y})
        print('acc: %.5f' % var_test_acc)
        print(var_predict, var_loss)
Пример #9
0
def main():
    parser = argparse.ArgumentParser(description="cifar-10 with PyTorch")
    parser.add_argument('--lr',
                        default=0.001,
                        type=float,
                        help='learning rate')
    parser.add_argument('--epoch',
                        default=200,
                        type=int,
                        help='number of epochs tp train for')
    parser.add_argument('--trainBatchSize',
                        default=100,
                        type=int,
                        help='training batch size')
    parser.add_argument('--testBatchSize',
                        default=100,
                        type=int,
                        help='testing batch size')
    parser.add_argument('--cuda',
                        default=torch.cuda.is_available(),
                        type=bool,
                        help='whether cuda is in use')
    args = parser.parse_args()
    # fc 2layer
    #    prune_all_fc_layers(args)
    solver = Solver(args)
    solver.load_data()
    solver.model = AlexNet()
    _, acc, _ = solver.test()
    print(acc)
Пример #10
0
def main():
    print(f"Train numbers:{len(dataset)}")

    # first train run this line
    model = AlexNet().to(device)
    # Load model
    # if device == 'cuda':
    #     model = torch.load(MODEL_PATH + MODEL_NAME).to(device)
    # else:
    #     model = torch.load(MODEL_PATH + MODEL_NAME, map_location='cpu')
    # cast
    cast = torch.nn.CrossEntropyLoss().to(device)
    # Optimization
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=LEARNING_RATE,
                                 weight_decay=1e-8)
    step = 1
    for epoch in range(1, NUM_EPOCHS + 1):
        model.train()

        # cal one epoch time
        start = time.time()

        for images, labels in dataset_loader:
            images = images.to(device)
            labels = labels.to(device)

            # Forward pass
            outputs = model(images)
            loss = cast(outputs, labels)

            # Backward and optimize
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print(f"Step [{step * BATCH_SIZE}/{NUM_EPOCHS * len(dataset)}], "
                  f"Loss: {loss.item():.8f}.")
            step += 1

        # cal train one epoch time
        end = time.time()
        print(f"Epoch [{epoch}/{NUM_EPOCHS}], " f"time: {end - start} sec!")

        # Save the model checkpoint
        torch.save(model, MODEL_PATH + '/' + MODEL_NAME)
    print(f"Model save to {MODEL_PATH + '/' + MODEL_NAME}.")
Пример #11
0
def main():
    args = check_args()

    # prepare dataset
    c10_train, c10_test = chainer.datasets.cifar.get_cifar10()
    train = CIFAR10Datset(c10_train, random=True)
    test = CIFAR10Datset(c10_test, random=False)

    train_iter = chainer.iterators.MultiprocessIterator(train, args.batchsize)
    test_iter = chainer.iterators.MultiprocessIterator(test, args.batchsize,
                                                       repeat=False,
                                                       shuffle=False)

    # setup model
    alexnet = AlexNet()
    serializers.load_npz('data/bvlc_alexnet.npz', alexnet)

    model = SSDH(alexnet, n_units=args.units)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        model.to_gpu()

    eval_model = model.copy()
    eval_model.train = False

    # setup optimizer
    optimizer = chainer.optimizers.MomentumSGD(lr=0.001, momentum=0.9)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(0.0005))

    # setup trainer
    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'),
                               out=args.out)

    trainer.extend(extensions.Evaluator(test_iter, eval_model,
                                        device=args.gpu))
    trainer.extend(extensions.snapshot())
    trainer.extend(extensions.snapshot_object(
                   model, 'model_{.updater.epoch}.npz'))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch',
         'main/cls-loss', 'validation/main/cls-loss',
         'main/binary-loss', 'validation/main/binary-loss',
         'main/50%-loss', 'validation/main/50%-loss',
         'main/accuracy', 'validation/main/accuracy']))
    trainer.extend(extensions.ProgressBar())

    # lr_policy: "step", stepsize=25000, gamma=0.1
    shifts = [(25000, 0.0001)]
    trainer.extend(StepShift('lr', shifts, optimizer))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    # start training
    trainer.run()
Пример #12
0
def test():
    if torch.cuda.device_count() > 1:
        model = torch.nn.parallel.DataParallel(
            AlexNet(num_classes=opt.num_classes))
    else:
        model = AlexNet(num_classes=opt.num_classes)
    model.load_state_dict(
        torch.load(MODEL_PATH, map_location=lambda storage, loc: storage))
    model.to(device)

    # init value
    correct1 = 0.
    correct5 = 0.
    total = len(test_dataloader.dataset)
    with torch.no_grad():
        for i, data in enumerate(test_dataloader):
            # get the inputs; data is a list of [inputs, labels]
            inputs, targets = data
            inputs = inputs.to(device)
            targets = targets.to(device)

            outputs = model(inputs)

            # cal top 1 accuracy
            prec1 = outputs.argmax(dim=1)
            correct1 += torch.eq(prec1, targets).sum().item()

            # cal top 5 accuracy
            maxk = max((1, 2))
            targets_resize = targets.view(-1, 1)
            _, prec5 = outputs.topk(maxk, 1, True, True)
            correct5 += torch.eq(prec5, targets_resize).sum().item()

    return correct1 / total, correct5 / total
Пример #13
0
def test(imageFolder):  #测试部分
    is_paramatter = False  #置False为导入整个模型,置True为导入参数文件
    if (is_paramatter):
        net = AlexNet()
        model = torch.load('./model_parameter.pth',
                           map_location=torch.device(device))  #模型参数文件
        net.load_state_dict(model)
    else:
        net = torch.load('./model.pkl', map_location=torch.device(device))
    net = net.to(device)
    torch.set_grad_enabled(False)
    torch.no_grad()
    net.eval()
    data_num = MyDataSet(imageFolder).__len__()
    for i in range(data_num):
        img, ori, name = MyDataSet(imageFolder, data_transform).__getitem__(i)
        out = net(img.to(device, torch.float))
        predict = out.argmax(dim=1)  #预测的label
        probability = out[:, predict]  #该label的概率
        s = 'Predict result: This is a '
        if predict == 0:
            s += 'CAT'
        else:
            s += 'DOG'
        s += ' with the probability of '
        s += str(round(float(probability), 4))
        plt.title(s)
        plt.imshow(ori)
        plt.savefig("./result/" + name.replace('.jpg', '') + ".png",
                    dpi=300)  #将结果保存在result文件夹内
        plt.show()  #显示图片
        print(name + ' Success!')
Пример #14
0
def main():
    train_x, train_y, test_x, test_y, label2name = cifar_10_data(
        FLAGS.data_dir)

    test_x = test_x[0:200]
    test_y = test_y[0:200]

    with tf.name_scope("input_data"):
        X = tf.placeholder(tf.float32, [None, 32, 32, 3], name='input')
    # predict = ResNet34(X)
    predict = AlexNet(X)
    with tf.name_scope("output_data"):
        Y = tf.placeholder(tf.int32, [None])
        Y_onehot = tf.cast(tf.one_hot(Y, 10, 1, 0), tf.float32)

    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=Y_onehot,
                                                logits=predict))
    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss)
    istrue = tf.equal(tf.argmax(predict, 1), tf.argmax(Y_onehot, 1))
    accuary = tf.reduce_mean(tf.cast(istrue, tf.float32))

    with tf.Session() as sess:
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=2)
        ckpt = tf.train.get_checkpoint_state("checkpoint/")
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            sess.run(tf.global_variables_initializer())
        starttime = time()
        for ep in range(0, FLAGS.train_iter):
            cur_loss = 0
            cur_acc = 0
            for images, labels in getbatch(train_x, train_y, FLAGS.batch_size):
                var_loss, var_pred = sess.run([loss, predict],
                                              feed_dict={
                                                  X: images,
                                                  Y: labels
                                              })
                cur_loss += var_loss
                var_train_acc = sess.run(accuary,
                                         feed_dict={
                                             X: images,
                                             Y: labels
                                         })
                cur_acc += var_train_acc
            var_test_acc = sess.run(accuary, feed_dict={X: test_x, Y: test_y})

            if ep % FLAGS.show_step == 0:
                print(time() - starttime)
                starttime = time()
                print(
                    'loss: %.5f train acc: %.5f acc: %.5f' %
                    (cur_loss / train_x.shape[0] * FLAGS.batch_size, cur_acc /
                     train_x.shape[0] * FLAGS.batch_size, var_test_acc))
Пример #15
0
def main():
    gpus = tf.config.experimental.list_physical_devices("GPU")
    if gpus:
        try:
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
        except RuntimeError as e:
            print(e)
            exit(-1)

    data_root = tf.keras.utils.get_file(
        'flower_photos',
        'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
        untar=True)

    train_ds = tf.keras.preprocessing.image_dataset_from_directory(
        str(data_root),
        validation_split=0.2,
        subset="training",
        seed=123,
        image_size=(224, 224),
        batch_size=32)

    val_ds = tf.keras.preprocessing.image_dataset_from_directory(
        str(data_root),
        validation_split=0.2,
        subset="validation",
        seed=123,
        image_size=(224, 224),
        batch_size=32)

    class_names = np.array(train_ds.class_names)

    # create direction for saving weights
    if not os.path.exists("save_weights"):
        os.makedirs("save_weights")

    # # create model
    model = AlexNet()

    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
        metrics=['acc'])

    history = model.fit(train_ds, epochs=10, validation_data=(val_ds))

    model.save("./save_weights/AlexNet_model")

    # 评估模型
    plt.plot(history.history['accuracy'], label='accuracy')
    plt.plot(history.history['val_accuracy'], label='val_accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.ylim([0.5, 1])
    plt.legend(loc='lower right')
    plt.show()
Пример #16
0
def main(args=None):
    logging.basicConfig(format='%(levelname)s:%(message)s',
                        level=logging.DEBUG)
    logging.info('Arquitetura AlexNet')

    parser = argparse.ArgumentParser()
    parser.add_argument('--num_classes',
                        help='Num. de classes.',
                        type=int,
                        default=1000)
    parser.add_argument('--pretrained',
                        help='Serão utilizados pesos pré-treinados.',
                        type=bool,
                        default=True)
    parser.add_argument('--model_url',
                        help='Caminho para os pesos.',
                        default="./pesos/alexnet-owt-4df8aa71.pth")

    opt = parser.parse_args(args)

    # Dados
    proc = Preprocessador()

    imagem_url = "./imagens/raposa.jpg"
    imagem = Image.open(imagem_url)
    imagem = proc.executa(imagem)
    #https://jhui.github.io/2018/02/09/PyTorch-Basic-operations/
    imagem = imagem.unsqueeze(0)

    # Instancia do modelo
    model = AlexNet(opt.num_classes)
    model.eval()

    # Caso deseje utilizar os pesos pré-treinados
    if opt.pretrained:
        checkpoint = torch.load(opt.model_url)
        model.load_state_dict(checkpoint)

    # Utiliza a GPU se existir no computador
    if torch.cuda.is_available():
        model.to('cuda')

    with torch.no_grad():
        saida = model(imagem)

    # Obtem o indice melhor ranqueado
    index = np.argmax(saida[0]).item()
    acuracia = torch.max(saida).item()

    print(getLabel(index), acuracia)
Пример #17
0
def _main(data_dir, batch_size, learning_rate, n_epoch):
    '''
    main function
    '''
    # Create dataloader
    dataloaders_dict = create_dataloader(data_dir, batch_size)

    # Detect if we have a GPU available
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Create model
    model = AlexNet()
    model = model.to(device)

    # Observe that all parameters are being optimized
    optimizer_ft = optim.Adam(model.parameters(), lr=learning_rate)
    criterion = nn.CrossEntropyLoss()

    model = train_model(model, dataloaders_dict, criterion, optimizer_ft,
                        device, n_epoch)

    torch.save(model, 'model.pt')
Пример #18
0
def main():
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    # Data
    print('==> Preparing dataset %s' % args.dataset)
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])
    if args.dataset == 'cifar10':
        dataloader = datasets.CIFAR10
        num_classes = 10
    else:
        dataloader = datasets.CIFAR100
        num_classes = 100


    trainset = dataloader(root=args.dataroot, train=True, download=True, transform=transform_train)
    sampler = torch.utils.data.distributed.DistributedSampler(trainset,num_replicas=hvd.size(), rank=hvd.rank())
    trainloader = data.DataLoader(dataset=trainset, batch_size=args.train_batch * world_size, shuffle=False, sampler=sampler)

    testset = dataloader(root=args.dataroot, train=False, download=False, transform=transform_test)
    testloader = data.DataLoader(testset, batch_size=args.test_batch * world_size, shuffle=False, num_workers=args.workers)

    # Model
    print("==> creating model '{}'".format("Alexnet"))
    model = AlexNet(num_classes=num_classes)

    device = torch.device('cuda', local_rank)
    model = model.to(device)
    # model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)    
    print('Model on cuda:%d' % local_rank)
    print('    Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    # 用horovod封装优化器
    optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
    # 广播参数
    hvd.broadcast_parameters(model.state_dict(), root_rank=0)

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)
        train_loss, train_acc = train(trainloader, model, criterion, optimizer, epoch, use_cuda)
        test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda)
        print('Rank:{} Epoch[{}/{}]: LR: {:.3f}, Train loss: {:.5f}, Test loss: {:.5f}, Train acc: {:.2f}, Test acc: {:.2f}.'.format(local_rank,epoch+1, args.epochs, state['lr'], 
        train_loss, test_loss, train_acc, test_acc))
Пример #19
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    data_transform = transforms.Compose(  # 首先对图片预处理: resize+toTensor+normalize        
        [
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])

    # load image
    img_path = ".\\pytorch_classification\\Test2_alexnet.\\dandelion.jpg"  # 载入图片
    assert os.path.exists(img_path), "file: '{}' dose not exist.".format(
        img_path)
    img = Image.open(img_path)

    plt.imshow(img)
    # [N, C, H, W]
    img = data_transform(img)
    # expand batch dimension
    img = torch.unsqueeze(img, dim=0)  # ^ .unsqueeze 扩充一个维度(batch维度)
    # ^ .squeeze() 对数据的维度进行压缩,去掉维数为1的的维度

    # read class_indict
    json_path = './class_indices.json'  # 读取json文件,即索引对应的类别名称
    assert os.path.exists(json_path), "file: '{}' dose not exist.".format(
        json_path)

    json_file = open(json_path, "r")
    class_indict = json.load(json_file)

    # create model
    model = AlexNet(num_classes=5).to(device)  # 初始化网络并放入设备中

    # load model weights
    weights_path = "./AlexNet.pth"
    assert os.path.exists(weights_path), "file: '{}' dose not exist.".format(
        weights_path)
    model.load_state_dict(torch.load(weights_path))  # 载入权重

    model.eval()  # 进入测试模式(包含dropout操作)
    with torch.no_grad():  # with torch.no_grad() 禁止参数跟踪:验证中不计算损失梯度
        # predict class
        output = torch.squeeze(model(
            img.to(device))).cpu()  # torch.squeeze压缩了维数为1的的维度(batch维度)
        predict = torch.softmax(output, dim=0)  # 通过softmax变成概率分布
        predict_cla = torch.argmax(predict).numpy()  # 获得概率最大的那个索引值,并将其转化为numpy

    print_res = "class: {}   prob: {:.3}".format(
        class_indict[str(predict_cla)],  # 打印类别名称和预测概率
        predict[predict_cla].numpy())
    plt.title(print_res)
    print(print_res)
    plt.show()
Пример #20
0
def main():
    # load model
    model = AlexNet()
    print('Loading pretrained model from {0}'.format(MODEL_PATH))
    chainer.serializers.load_hdf5(MODEL_PATH, model)

    # prepare net input

    print('Loading image from {0}'.format(IMAGE_PATH))
    img = scipy.misc.imread(IMAGE_PATH, mode='RGB')
    img = scipy.misc.imresize(img, (227, 227))
    img_in = img.copy()

    img = img[:, :, ::-1]  # RGB -> BGR
    img = img.astype(np.float32)
    mean_bgr = np.array([104, 117, 123], dtype=np.float32)
    img -= mean_bgr

    x_data = np.array([img.transpose(2, 0, 1)])
    x = chainer.Variable(x_data, volatile='ON')

    # infer
    model(x)
    score = model.score.data[0]

    # visualize result

    likelihood = np.exp(score) / np.sum(np.exp(score))
    argsort = np.argsort(score)

    print('Loading label_names from {0}'.format(SYNSET_PATH))
    with open(SYNSET_PATH, 'r') as f:
        label_names = np.array([line.strip() for line in f.readlines()])

    print('Likelihood of top5:')
    top5 = argsort[::-1][:5]
    for index in top5:
        print('  {0:5.1f}%: {1}'.format(likelihood[index] * 100,
                                        label_names[index]))

    img_viz = draw_image_classification_top5(img_in, label_names[top5],
                                             likelihood[top5])
    plt.imshow(img_viz)
    plt.axis('off')
    plt.tight_layout()
    plt.show()
Пример #21
0
def main():
    parser = argparse.ArgumentParser(description='AlexNet for MNIST')
    parser.add_argument('--batch_size', '-b', type=int, default=100)
    parser.add_argument('--epoch', '-e', type=int, default=30)
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--out', '-o', type=str, default='result')
    args = parser.parse_args()

    train_ds, test_ds = chainer.datasets.get_mnist(ndim=3)
    train_iter = iterators.SerialIterator(train_ds, args.batch_size)
    test_iter = iterators.SerialIterator(test_ds,
                                         args.batch_size,
                                         repeat=False,
                                         shuffle=False)

    model = L.Classifier(AlexNet())
    if args.gpu >= 0:
        model.to_gpu(args.gpu)

    optimizer = optimizers.Adam(alpha=0.0001)
    optimizer.setup(model)

    updater = training.updaters.StandardUpdater(train_iter,
                                                optimizer,
                                                device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
    trainer.extend(extensions.LogReport())
    trainer.extend(
        extensions.PlotReport(['main/loss', 'validation/main/loss'],
                              'epoch',
                              file_name='loss.png'))
    trainer.extend(
        extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'],
                              'epoch',
                              file_name='accuracy.png'))
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
            'validation/main/accuracy', 'elapsed_time'
        ]))
    trainer.extend(extensions.ProgressBar())

    trainer.run()
Пример #22
0
def get_prediction(image_bytes):
# 异常处理:防止传入非图片的东西
    try:
        weights_path = "./Alexnet.pth"
        class_json_path = "./class_indices.json"
        assert os.path.exists(weights_path), "weights path does not exist..."
        assert os.path.exists(class_json_path), "class json path does not exist..."

        # select device
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        print(device)
        # create model
        model = AlexNet(num_classes=5)
        # load model weights
        model.load_state_dict(torch.load(weights_path, map_location=device))
        model.to(device)
        model.eval()

        # load class info
        json_file = open(class_json_path, 'rb')
        class_indict = json.load(json_file)


        tensor = transform_image(image_bytes=image_bytes)
        outputs = torch.softmax(model.forward(tensor).squeeze(), dim=0)
        # detach去除梯度信息
        prediction = outputs.detach().cpu().numpy()
        # < 左对齐
        template = "class:{:<15} probability:{:.3f}"
        index_pre = [(class_indict[str(index)], float(p)) for index, p in enumerate(prediction)]
        # sort probability
        index_pre.sort(key=lambda x: x[1], reverse=True)
        text = [template.format(k, v) for k, v in index_pre]
        return_info = {"result": text}
    except Exception as e:
        return_info = {"result": [str(e)]}
    return return_info
Пример #23
0
def main():
    global actor_critic, directory, weight
    num_cls = args.wave_num * args.k + 1  # 所有的路由和波长选择组合,加上啥都不选

    if args.append_route.startswith("True"):
        channel_num = args.wave_num+args.k
    else:
        channel_num = args.wave_num

    # 解析weight
    if args.weight.startswith('None'):
        weight = None
    else:
        weight = args.weight

    # CNN学习模式下,osb的shape应该是CHW
    assert args.mode.startswith('learning')
    # 模型初始化
    if args.cnn.startswith('mobilenetv2'):
        actor_critic = MobileNetV2(in_channels=channel_num, num_classes=num_cls, t=6)
    elif args.cnn.startswith('simplenet'):
        actor_critic = SimpleNet(in_channels=channel_num, num_classes=num_cls)
    elif args.cnn.startswith('simplestnet'):
        actor_critic = SimplestNet(in_channels=channel_num, num_classes=num_cls)
    elif args.cnn.startswith('alexnet'):
        actor_critic = AlexNet(in_channels=channel_num, num_classes=num_cls)
    elif args.cnn.startswith('squeezenet'):
        actor_critic = SqueezeNet(in_channels=channel_num, num_classes=num_cls, version=1.0)
    else:
        raise NotImplementedError

    times = 1  # 重复次数
    prefix = "trained_models"
    directory = os.path.join(prefix, 'a2c', args.cnn, args.step_over)

    if args.comp.startswith("states"):
        all_states_comp()
    elif args.comp.startswith("random"):
        random_comp(times=times)
    elif args.comp.startswith("None"):
        raise ValueError("Wrong call for this script")
    else:
        raise NotImplementedError
Пример #24
0
    def __init__(self):
        self._input_shape = (224, 224, 3)
        self._output_dim = 2

        model = AlexNet(self._input_shape, self._output_dim).get_model()
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)
        model.compile(loss=tf.keras.losses.categorical_crossentropy,
                      optimizer=optimizer,
                      metrics=["accuracy"])
        reduce_lro_n_plat = ReduceLROnPlateau(monitor='val_loss',
                                              factor=0.8,
                                              patience=10,
                                              verbose=1,
                                              mode='auto',
                                              min_delta=0.0001,
                                              cooldown=5,
                                              min_lr=1e-10)
        early = EarlyStopping(monitor="val_loss", mode="min", patience=20)

        data_gen = ImageDataGenerator()
        train_it = data_gen.flow_from_directory('data/all/train/',
                                                target_size=(224, 224))
        val_it = data_gen.flow_from_directory('data/all/validation/',
                                              target_size=(224, 224))

        callbacks_list = [early, reduce_lro_n_plat]

        try:
            model.fit(train_it,
                      batch_size=32,
                      epochs=10000,
                      validation_data=val_it,
                      callbacks=callbacks_list,
                      verbose=1)
        except KeyboardInterrupt:
            pass

        model.save_weights("data/model.h5")
        tfjs.converters.save_keras_model(model, "data/model")
Пример #25
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    # /home/xulei/数据集大本营/5_flower_data
    img_path = "/home/xulei/数据集大本营/5_flower_data/predict/rose01.jpeg"
    assert os.path.exists(img_path), "file: '{}' dose not exist.".format(
        img_path)
    img = Image.open(img_path)
    plt.imshow(img)
    img = data_transform(img)
    img = torch.unsqueeze(img, dim=0)

    json_path = './class_idices.json'
    assert os.path.exists(json_path), "file '{}' dose not exist.".format(
        json_path)

    json_file = open(json_path, 'r')
    class_indict = json.load(json_file)
    model = AlexNet(num_classes=5).to(device)

    # load model weights
    weights_path = "./AlexNet.pth"
    assert os.path.exists(weights_path), "file '{}' dose not exists.".format(
        weights_path)
    model.load_state_dict(torch.load(weights_path))

    model.eval()
    with torch.no_grad():
        output = torch.squeeze(model(img.to(device))).cpu()
        predict = torch.softmax(output, dim=0)
        predict_cla = torch.argmax(predict).numpy()

    print_res = "class: {} prob: {:.3f}".format(class_indict[str(predict_cla)],
                                                predict[predict_cla].numpy())
    plt.title(print_res)
    print(print_res)
    plt.show()
Пример #26
0
    def loadModel(self):
        """载入指定的模型"""

        default_dir = os.getcwd()
        modelPath = askopenfilename(
            title='选择一个模型文件',
            initialdir=(os.path.expanduser(default_dir)),
            filetypes=[('pth文件', '*.pth'), ('All Files', '*')])
        if modelPath == "":
            return

        try:
            self.label_info.config(text="载入模型中……")
            model = AlexNet(num_classes=5)
            model.load_state_dict(torch.load(modelPath))
            model.eval()
            self.model = model
        except Exception as e:
            self.label_info.config(text="模型载入出错")
        finally:
            self.button_loadImage.config(state=tk.NORMAL)
            self.label_info.config(text="请打开一张图片")
Пример #27
0
def model_retrain(number_class):
    model = AlexNet(number_class)
    model = model.to(device)
    return model
Пример #28
0
from losss import BatchHardTripletLoss

################## config ################
device = torch.device("cuda:7")
date = time.strftime("%m-%d", time.localtime())
#date = "03-14"

model_path = "/home/lxd/checkpoints/" + date

model_name = sys.argv[1]
if model_name == "vgg16":
    model = Vgg16Net()
elif model_name == "mobile":
    model = MobileNet()
elif model_name == "alexnet":
    model = AlexNet()
elif model_name == "res50":
    model = ResNet50()
elif model_name == "res34":
    model = ResNet34()
elif model_name == "vgg11":
    model = Vgg11Net()
else:
    print("Moddel Wrong")
model.to(device)

# train/test
loss_name = sys.argv[2]
batch = sys.argv[3]

model.eval()
Пример #29
0
fine_tuner = PrunningFineTuner_CNN(model)

opt = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
fine_tuner.train(optimizer = opt, epoches = 10)
opt = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
fine_tuner.train(optimizer = opt, epoches = 15)
opt = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
fine_tuner.train(optimizer = opt, epoches = 15)

torch.save(model, "model")
"""
#
# model = torch.load("model").cuda()
# fine_tuner = PrunningFineTuner_CNN(model)
# Acc, Layers_Prunned = fine_tuner.prune()
model = AlexNet().cuda()
model2 = CNN().cuda()
model_dict = load_model_dict('./best_model_new.pkl')
_, test = list(model.features._modules.items())[0]
print(type(test))
print(test.weight.data.cpu().numpy().shape)
# fine_tuner = PrunningFineTuner_CNN(model2)
# acc, layers_pruned = fine_tuner.prune()
# print(acc, layers_pruned)
"""
net = torch.load('model_prunned')

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net.to(device)
testloader = dataset.testloader()
Пример #30
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))

    data_transform = {
        "train":
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]),
        "val":
        transforms.Compose([
            transforms.Resize((224, 224)),  # cannot 224, must (224, 224)
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
    }

    data_root = os.path.abspath(os.path.join(os.getcwd(),
                                             "../.."))  # get data root path
    image_path = os.path.join(data_root, "data_set",
                              "flower_data")  # flower data set path
    assert os.path.exists(image_path), "{} path does not exist.".format(
        image_path)
    train_dataset = datasets.ImageFolder(root=os.path.join(
        image_path, "train"),
                                         transform=data_transform["train"])
    train_num = len(train_dataset)

    # {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
    flower_list = train_dataset.class_to_idx
    cla_dict = dict((val, key) for key, val in flower_list.items())
    # write dict into json file
    json_str = json.dumps(cla_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    batch_size = 32
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0,
              8])  # number of workers
    print('Using {} dataloader workers every process'.format(nw))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=nw)

    validate_dataset = datasets.ImageFolder(root=os.path.join(
        image_path, "val"),
                                            transform=data_transform["val"])
    val_num = len(validate_dataset)
    validate_loader = torch.utils.data.DataLoader(validate_dataset,
                                                  batch_size=4,
                                                  shuffle=False,
                                                  num_workers=nw)

    print("using {} images for training, {} images for validation.".format(
        train_num, val_num))
    # test_data_iter = iter(validate_loader)
    # test_image, test_label = test_data_iter.next()
    #
    # def imshow(img):
    #     img = img / 2 + 0.5  # unnormalize
    #     npimg = img.numpy()
    #     plt.imshow(np.transpose(npimg, (1, 2, 0)))
    #     plt.show()
    #
    # print(' '.join('%5s' % cla_dict[test_label[j].item()] for j in range(4)))
    # imshow(utils.make_grid(test_image))

    net = AlexNet(num_classes=5, init_weights=True)

    net.to(device)
    loss_function = nn.CrossEntropyLoss()
    # pata = list(net.parameters())
    optimizer = optim.Adam(net.parameters(), lr=0.0002)

    epochs = 10
    save_path = './AlexNet.pth'
    best_acc = 0.0
    train_steps = len(train_loader)
    for epoch in range(epochs):
        # train
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader)
        for step, data in enumerate(train_bar):
            images, labels = data
            optimizer.zero_grad()
            outputs = net(images.to(device))
            loss = loss_function(outputs, labels.to(device))
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()

            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(
                epoch + 1, epochs, loss)

        # validate
        net.eval()
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            val_bar = tqdm(validate_loader)
            for val_data in val_bar:
                val_images, val_labels = val_data
                outputs = net(val_images.to(device))
                predict_y = torch.max(outputs, dim=1)[1]
                acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

        val_accurate = acc / val_num
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))

        if val_accurate > best_acc:
            best_acc = val_accurate
            torch.save(net.state_dict(), save_path)

    print('Finished Training')