Example #1
0
    def __init__(self):
        LeNet.__init__(self, 28, 28)

        #控制变量定义
        self.learning_rate = 0.001
        # 记录已经训练的次数
        self.global_step = tf.Variable(0, trainable=False)
        self.x = tf.placeholder(tf.float32, [None, 784])
        self.label = tf.placeholder(tf.float32, [None, 10])
        self.x_image = tf.reshape(self.x, [-1, 28, 28, 1])

        #网路层次定义
        self.layer1(5, 5, 32)
        self.layer2(5, 5, 64)
        self.fullConnLayer(int(1024))
        self.outputLayer(10)

        #计算参数定义
        #loss
        self.loss = -tf.reduce_sum(self.label * tf.log(self.y + 1e-10))

        # minimize 可传入参数 global_step, 每次训练 global_step的值会增加1
        # 因此,可以通过计算self.global_step这个张量的值,知道当前训练了多少步
        self.train = tf.train.AdamOptimizer(self.learning_rate).minimize(
            self.loss, global_step=self.global_step)

        predict = tf.equal(tf.argmax(self.label, 1), tf.argmax(self.y, 1))
        self.accuracy = tf.reduce_mean(tf.cast(predict, "float"))
Example #2
0
def main():
    transform = transforms.Compose([
        transforms.Resize(
            (32, 32)),  #transforms.Resize((32,32))因为下载的图不一定是标准的,先转为32,32
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')

    net = LeNet()  #实例化
    net.load_state_dict(torch.load('Lenet.pth'))  #载入权重文件

    im = Image.open(
        'luxing_cat.jpg')  #通过PIL、numpy一般导入的格式为(height,width,channel)[H,W,C]
    im = transform(im)  # [C, H, W]
    im = torch.unsqueeze(im, dim=0)  # 加上batch维度(dim=0表示在最前面) [N, C, H, W]

    with torch.no_grad():
        outputs = net(im)
        predict = torch.max(outputs, dim=1)[1].data.numpy()
    print(classes[int(predict)])

    #使用softmax
    with torch.no_grad():
        outputs = net(im)
        predict = torch.softmax(outputs, dim=1)  #dim=0是batch维度
    print(predict)
Example #3
0
def main():
    transform = transforms.Compose([
        transforms.Resize((32, 32)),  # 因为下载的图片大小不一,所以先resize      
        transforms.ToTensor(),  # ToTensor() 将 H*W*C的numpy 变为 C*H*W的tensor
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])  # 标准化处理

    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')

    net = LeNet()  # 实例化对象
    net.load_state_dict(torch.load('Lenet.pth'))  # 通过load_state_dict载入保存的权重文件

    #im = Image.open('1.jpg')                               # PIL库的Image 打开图像,H*W*C
    im = Image.open('./pytorch_classification/Test1_official_demo/1.jpg')
    im = transform(
        im
    )  # [C, H, W]                         # 如果要在网络中传播,必须变为tensor格式,所以transform变为C*H*W
    im = torch.unsqueeze(
        im, dim=0
    )  # [N, C, H, W]         # ^ torch.unsqueeze(~~~ , dim=0)在第0维增加新的维度;变为N*C*H*W

    with torch.no_grad():  # ^ 测试阶段,不需要求损失梯度,使用torch.no_grad()
        outputs = net(im)  # 图像传入网络中得到输出,输出的维度为[batch, 10],只关注dim=1
        predict = torch.max(outputs, dim=1)[1].data.numpy(
        )  # 找到第1个维度中最大的数,但是只关注其位置(即[1],index),转化为numpy
        predict1 = torch.softmax(outputs, dim=1)  # 得到了概率分布
    print(classes[int(predict)])  # 将index传入到classes就得到了类别
    print(predict1)  # 打印概率分布
Example #4
0
def main():
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    # 50000张训练图片
    # 第一次使用时要将download设置为True才会自动去下载数据集
    train_set = torchvision.datasets.CIFAR10(root='./data', train=True,
                                             download=False, transform=transform)
    train_loader = torch.utils.data.DataLoader(train_set, batch_size=36,
                                               shuffle=True, num_workers=0)

    # 10000张验证图片
    # 第一次使用时要将download设置为True才会自动去下载数据集
    val_set = torchvision.datasets.CIFAR10(root='./data', train=False,
                                           download=False, transform=transform)
    val_loader = torch.utils.data.DataLoader(val_set, batch_size=5000,
                                             shuffle=False, num_workers=0)
    val_data_iter = iter(val_loader)
    val_image, val_label = val_data_iter.next()

    # classes = ('plane', 'car', 'bird', 'cat',
    #            'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

    net = LeNet()
    loss_function = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.001)

    for epoch in range(5):  # loop over the dataset multiple times

        running_loss = 0.0
        for step, data in enumerate(train_loader, start=0):
            # get the inputs; data is a list of [inputs, labels]
            inputs, labels = data

            # zero the parameter gradients
            optimizer.zero_grad()
            # forward + backward + optimize
            outputs = net(inputs)
            loss = loss_function(outputs, labels)
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()
            if step % 500 == 499:  # print every 500 mini-batches
                with torch.no_grad():
                    outputs = net(val_image)  # [batch, 10]
                    predict_y = torch.max(outputs, dim=1)[1]
                    accuracy = (predict_y == val_label).sum().item() / val_label.size(0)

                    print('[%d, %5d] train_loss: %.3f  test_accuracy: %.3f' %
                          (epoch + 1, step + 1, running_loss / 500, accuracy))
                    running_loss = 0.0

    print('Finished Training')

    save_path = './Lenet.pth'
    torch.save(net.state_dict(), save_path)
Example #5
0
def trainModel(EPOCH_NUM=50, save=False, show=False):
    print("Train Start:")
    net = LeNet().to(devices)
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.RMSprop(net.parameters(),
                                    lr=LR,
                                    alpha=0.9,
                                    eps=1e-08,
                                    weight_decay=0,
                                    momentum=0,
                                    centered=False)
    #x,trainloss,trainacc,testacc = [],[],[],[]
    batch, batchloss = [], []
    for epoch in range(EPOCH_NUM):
        sum_loss = 0.0
        acc = 0
        iter = 0
        for i, (inputs, labels) in enumerate(trainLoader):
            inputs, labels = inputs.to(devices), labels.to(devices)
            # forward and backward
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            sum_loss += loss.item()
            _, pred = torch.max(outputs.data, 1)
            acc += (pred == labels).sum()
            iter = iter + 1
            batch.append(i)
            batchloss.append(loss.item())
        if show == True:
            plt.figure()
            plt.plot(batch, batchloss, 'b')
            plt.title('one epoch')
            plt.xlabel('iteration')
            plt.ylabel('loss')
            plt.show()
    # trainloss.append(sum_loss/iter)
    # trainacc.append(100*acc/len(trainData))
    # x.append(epoch)
        print('Epoch [%d] : loss [%f]' % (epoch + 1, sum_loss / iter))
        print('train accuracy = %f%%' % (100 * acc / len(trainData)))
        #with torch.no_grad():
        #    correct = 0
        #    total = 0
        #    for data in testLoader:
        #        images, labels = data
        #        images, labels = images.to(devices), labels.to(devices)
        #        outputs = net(images)
        #        _, predicted = torch.max(outputs.data, 1)
        #        total += labels.size(0)
        #        correct += (predicted == labels).sum()
        #print('test accuracy = %f%%'%(100*correct/total))
        #testacc.append(100*correct/total)
    if save == True:
        torch.save(net.state_dict(), 'MNIST_Model.pth')
Example #6
0
def train_lenet(device, dataset_path):
    train_loader, valid_loader, test_loader = get_data_loaders(dataset_path)

    model = LeNet(35)
    optimizer = optim.Adam(model.parameters(),
                           lr=Consts.lr,
                           weight_decay=Consts.weight_decay)
    loss_criterion = torch.nn.NLLLoss()
    model.apply(weight_init)
    model.to(device)
    train_loss = []
    val_loss = []
    val_acc = []
    for epoch in range(Consts.epochs):
        t_loss = train(model, train_loader, optimizer, loss_criterion, device)
        v_loss, v_acc = evaluation(model, valid_loader, loss_criterion, device)
        torch.save(model.state_dict(), f'models/epoch-{epoch + 1}.pth')
        train_loss.append(t_loss)
        val_loss.append(v_loss)
        val_acc.append(v_acc)
        print(f'train loss in epoch {epoch + 1} is: {t_loss}')
        print(f'validation loss in epoch {epoch + 1} is: {v_loss}')
        print(f'validation accuracy in epoch {epoch + 1} is: {v_acc}')

    plot_loss(train_loss, val_loss, val_acc)
    test_loss, test_acc = test_model(model, test_loader, loss_criterion,
                                     val_loss, device, 'models/')
Example #7
0
def upload_image():
    if 'file' not in request.files:
        flash('No file part')
        return redirect(request.url)
    file = request.files['file']

    if file.filename == '':
        flash('No image selected for uploading')
        return redirect(request.url)

    if file and img_files(file.filename):
        filename = secure_filename(file.filename)
        image_source = request.files['file'].stream

        # image_path = './static/images/' + filename
        weight_path = './weight/mnist.pth'

        model = LeNet().to(device)
        image, preds = evaluation(image_source, weight_path, model)

        image = image[0]
        save_image(image, './static/images/' + filename)

        preds = int(preds.cpu())

        print('upload_image filename: ' + filename)

        flash(f'Prediction: {preds}')
        return render_template('upload.html', filename=filename)
    else:
        flash('Image extension must be -> png, jpg, jpeg, gif')
        return redirect(request.url)
Example #8
0
def main():
    with open("config.json", "r") as f:
        config = json.load(f)

    # Load Cifar data
    data = DataLoader(config)

    # Create LeNet model
    net = LeNet(config)

    # Create trainer
    trainer = Trainer(net.model, data, config)

    # # Train model
    # trainer.train()

    # # Save LeNet model weights
    # trainer.save_weights()

    # Load weights
    load_path = self.config["trainer"]["save_dir"] + self.config["experiment_name"] + \
      "/" + self.config["trainer"]["save_trained_name"] +  "_full.hdf5"
    trainer.load_weights(load_path)

    # Evaluate validation set
    trainer.evaluate()
def quickdraw_predict():
    quickdraw_animal_map = [
        'ant', 'bat', 'bear', 'bee', 'bird', 'butterfly', 'camel', 'cat',
        'cow', 'dog', 'dolphin', 'dragon', 'duck', 'elephant', 'fish',
        'flamingo', 'frog', 'giraffe', 'hedgehog', 'horse', 'kangaroo', 'lion',
        'lobster', 'mermaid', 'monkey', 'mosquito', 'mouse', 'octopus', 'owl',
        'panda', 'penguin', 'pig', 'rabbit', 'raccoon', 'shark', 'sheep',
        'snail', 'snake', 'spider', 'squirrel', 'teddy-bear', 'tiger', 'whale',
        'zebra'
    ]

    if request.method == 'POST':
        quick_draw = request.form['url']
        quick_draw = quick_draw[init_Base64:]
        quick_draw_decoded = base64.b64decode(quick_draw)

        # Fix later(to PIL version)
        # Conver bytes array to PIL Image
        # imageStream = io.BytesIO(draw_decoded)
        # img = Image.open(imageStream)

        quick_img = np.asarray(bytearray(quick_draw_decoded), dtype="uint8")
        quick_img = cv2.imdecode(quick_img, cv2.IMREAD_GRAYSCALE)
        quick_img = cv2.resize(quick_img, (28, 28),
                               interpolation=cv2.INTER_AREA)
        quick_img = Image.fromarray(quick_img)

        weight_path = './weight/quickdraw_90_animal.pth'
        quick_model = LeNet(num_classes=44).to(device)
        quick_img, quick_pred = quickdraw_evaluation(quick_img, weight_path,
                                                     quick_model)

        quick_pred = int(quick_pred)
        quick_label = quickdraw_animal_map[quick_pred]
    return render_template('draw_quickdraw.html', prediction=quick_label)
def mnist_upload_image():
    if request.method == 'POST':
        mnist_f = request.files['file']
        mnist_fname = secure_filename(mnist_f.filename)

        if mnist_fname == '':
            return redirect(url_for('mnist_view'))
        os.makedirs('static', exist_ok=True)
        mnist_f.save(os.path.join('static', mnist_fname))

        mnist_img = Image.open(mnist_f, 'r')

        mnist_img_display = mnist_img.resize(
            (256, 256))  # To display large size image
        mnist_display = 'display_' + mnist_fname
        mnist_img_display.save(os.path.join('static', mnist_display))

        weight_path = './weight/mnist.pth'
        mnist_model = LeNet().to(device)
        mnist_img, mnist_preds = mnist_evaluation(mnist_img, weight_path,
                                                  mnist_model)
        mnist_preds = int(mnist_preds)
    return render_template('upload_mnist.html',
                           num=mnist_preds,
                           filename=mnist_display)
Example #11
0
def mnist_predict():
    if request.method == 'POST':

        mnist_draw = request.form['url']
        mnist_draw = mnist_draw[init_Base64:]
        mnist_draw_decoded = base64.b64decode(mnist_draw)

        # Fix later(to PIL version)
        # Conver bytes array to PIL Image
        # imageStream = io.BytesIO(draw_decoded)
        # img = Image.open(imageStream)

        mnist_img = np.asarray(bytearray(mnist_draw_decoded), dtype="uint8")
        mnist_img = cv2.imdecode(mnist_img, cv2.IMREAD_GRAYSCALE)
        mnist_img = cv2.resize(mnist_img, (28, 28),
                               interpolation=cv2.INTER_AREA)
        mnist_img = Image.fromarray(mnist_img)

        weight_path = './weight/mnist.pth'
        mnist_model = LeNet().to(device)
        mnist_img, mnist_pred = mnist_evaluation(mnist_img, weight_path,
                                                 mnist_model)

        mnist_pred = int(mnist_pred)
    return render_template('draw_mnist.html', prediction=mnist_pred)
Example #12
0
def runInference(test_input_x):
    """Test inference for given image"""
    tf.reset_default_graph()
    xs = tf.placeholder(tf.float32, [None, 784], name='input')
    dropout = tf.placeholder(tf.float32, name='dropout')

    #get the model logits
    model_logits = LeNet(xs, dropout)
    output = tf.argmax(tf.contrib.layers.softmax(model_logits), 1)

    if not os.path.exists(model_ckpt_path):
        raise ValueError('[!] model Checkpoint path does not exist...')

    try:
        with tf.Session() as sess:
            print('[*] Reading checkpoint...')
            saver = tf.train.Saver()
            saver.restore(sess, os.path.join(model_ckpt_path, model_name))
            output = sess.run(output,
                              feed_dict={
                                  xs: test_input_x,
                                  dropout: 0.0
                              })
    except Exception as e:
        print(e)

    return output
 def get_trace() -> AttrMap:
     return reconstruct_static_trace_from_tf(
         model_fn=lambda: LeNet(),
         input_fn=lambda: tf.placeholder(tf.float32, shape=(1, 1, 28, 28)),
         model_dir=tf.train.latest_checkpoint(abspath("tf/lenet/model/")),
         density=threshold_to_density[threshold],
     )
Example #14
0
    def __init__(self, args=None):
        super().__init__()

        self.writer = tX.SummaryWriter(log_dir=log_dir, comment='LeNet')
        self.train_logger = None
        self.eval_logger = None
        self.args = args

        self.step = 0
        self.epoch = 0
        self.best_error = float('Inf')

        self.device = torch.device('cpu')

        self.model = LeNet().to(self.device)
        self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
        self.criterion = torch.nn.CrossEntropyLoss()
        self.metric = metric

        transform = tfs.Compose(
            [tfs.ToTensor(),
             tfs.Normalize((0.1307, ), (0.3081, ))])
        train_dataset = MNIST(root='MNIST',
                              train=True,
                              transform=transform,
                              download=True)
        train_loader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True)

        test_dataset = MNIST(root='MNIST',
                             train=False,
                             transform=transform,
                             download=True)
        test_loader = DataLoader(test_dataset,
                                 batch_size=batch_size,
                                 shuffle=False)

        self.train_loader = train_loader
        self.test_loader = test_loader

        self.ckpt_dir = ckpt_dir
        self.log_per_step = log_per_step
        # self.eval_per_epoch = None

        self.check_init()
Example #15
0
def test(device, test_dataset_path, model_path):
    test_loader = get_test_loader(test_dataset_path)
    loss_criterion = torch.nn.NLLLoss()
    model = LeNet(35)
    if device.type == 'cpu':
        model.load_state_dict(torch.load(model_path, map_location=device))
    else:
        model.load_state_dict(torch.load(model_path))
    model.to(device)
    test_model(model, test_loader, loss_criterion, [], device, None)
Example #16
0
def Inference(index):
    model = LeNet()
    model.load_state_dict(torch.load('MNIST_Model.pth'))
    img, label = testData.__getitem__(index)
    img = img.unsqueeze(0)
    output = model(img)
    print(output)
    output = output.tolist()
    output = [i for i in output[0]]
    x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
    plt.figure()
    plt.subplot(2, 1, 1)
    plt.imshow(img.reshape(28, 28))
    plt.subplot(2, 1, 2)
    plt.ylim(0, 1)
    plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
    plt.xlim(0, 9)
    plt.bar(x, output)
    plt.show()
Example #17
0
def main():
    with open("config.json", "r") as f:
        config = json.load(f)

    ## Prepare data
    data = CifarDataset(config)

    all_transforms = transforms.Compose(
        [ToGrayscale(), Normalize(), ToTensor()])

    train_data_transformed = CifarDataLoader(config,
                                             data.X_train,
                                             data.y_train,
                                             transform=all_transforms)
    train_loader = DataLoader(train_data_transformed,
                              batch_size=config["data_loader"]["batch_size"],
                              shuffle=False,
                              num_workers=4)

    if config["validation"]["split"]:
        valid_data_transformed = CifarDataLoader(config,
                                                 data.X_valid,
                                                 data.y_valid,
                                                 transform=all_transforms)
        valid_loader = DataLoader(
            valid_data_transformed,
            batch_size=config["data_loader"]["batch_size"],
            shuffle=False,
            num_workers=4)

    test_data_transformed = CifarDataLoader(config,
                                            data.X_test,
                                            data.y_test,
                                            transform=all_transforms)
    test_loader = DataLoader(test_data_transformed,
                             batch_size=config["data_loader"]["batch_size"],
                             shuffle=False,
                             num_workers=4)

    ## Create neural net
    net = LeNet()

    ## Training
    trainer = Trainer(model=net,
                      config=config,
                      train_data_loader=train_loader,
                      valid_data_loader=valid_loader,
                      test_data_loader=test_loader)
    trainer.train()

    ## Saving model parameters
    trainer.save_model_params()

    ## Evaluate test data
    trainer.evaluate()
Example #18
0
def main():
    class Args():
        def __init__(self):
            pass

    args = Args()
    args.batch_size = 64
    args.test_batch_size = 2
    args.epochs = 10
    args.lr = 0.0001
    args.momentum = 0.5
    args.no_cuda = False
    args.seed = 1
    args.log_interval = 100
    args.save_model = True

    use_cuda = not args.no_cuda and torch.cuda.is_available()

    # torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    transform = transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))])
    dataset_dir = Path(os.environ['HOME'])/"datasets/mnist"
    train_dataset = MnistDataset(root_dir=dataset_dir/"train", transform=transform)
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4)
    test_dataset = MnistDataset(root_dir=dataset_dir/"test", transform=transform)
    test_loader = DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=True, num_workers=4)

    model = LeNet().to(device)
    # model = nn.DataParallel(model)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        test(args, model, device, test_loader)

    if (args.save_model):
        torch.save(model.state_dict(), "mnist_cnn.pth")
Example #19
0
def main(config):
    GPUManager.auto_chooce()
    engine = DefaultClassificationEngine()
    engine.hooks.update(
        dict(
            on_sample=on_sample_hook,
            on_start=on_start_hook,
            on_forward=on_forward_hook,
        )
    )
    net = LeNet(n_channels=config.n_channels, size=config.size)
    print(net)
    if torch.cuda.is_available():
        net = net.cuda()
    train_loader = torch.utils.data.DataLoader(
        config.dataset(train=True),
        batch_size=config.batch_size, shuffle=True, num_workers=8
    )
    val_loader = torch.utils.data.DataLoader(
        config.dataset(train=False),
        batch_size=1000, shuffle=True, num_workers=8)
    optimizer = optim.SGD(net.parameters(), lr=config.lr, momentum=0.9)
    recorder = defaultdict(list)
    recorder.update(
        dict(
            dataset_name=config.dataset.__name__.split("_")[0],
            lr=config.lr,
            batch_size=config.batch_size
        )
    )
    pprint(recorder)
    for epoch in range(config.maxepoch):
        state=engine.train(network=net, iterator=train_loader, maxepoch=1, optimizer=optimizer)
        recorder["train_loss"].append(state["loss_meter"].value())
        recorder["train_acc"].append(state["acc_meter"].value())
        state=engine.validate(network=net, iterator=val_loader)
        recorder["val_loss"].append(state["loss_meter"].value())
        recorder["val_acc"].append(state["acc_meter"].value())
    filename = f"{recorder['dataset_name']}_" + time.strftime("%Y%m%d_%H%M%S", time.localtime())
    with open(f"../result/{filename}.static", "wb") as f:
        pickle.dump(recorder, f)
Example #20
0
def main():
    transform = transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')

    net = LeNet()
    net.load_state_dict(torch.load('Lenet.pth'))

    im = Image.open('1.jpg')
    im = transform(im)  # [C, H, W]
    im = torch.unsqueeze(im, dim=0)  # [N, C, H, W]

    with torch.no_grad():
        outputs = net(im)
        predict = torch.max(outputs, dim=1)[1].data.numpy()
    print(classes[int(predict)])
Example #21
0
def usually_train():
    net = LeNet()						  				# 定义训练的网络模型
    loss_function = nn.CrossEntropyLoss() 				# 定义损失函数为交叉熵损失函数 
    optimizer = optim.Adam(net.parameters(), lr=0.001)  # 定义优化器(训练参数,学习率)

    for epoch in range(5):  # 一个epoch即对整个训练集进行一次训练
        running_loss = 0.0
        time_start = time.perf_counter()
        
        for step, data in enumerate(train_loader, start=0):   # 遍历训练集,step从0开始计算
            inputs, labels = data 	# 获取训练集的图像和标签
            optimizer.zero_grad()   # 清除历史梯度
            
            # forward + backward + optimize
            outputs = net(inputs)  				  # 正向传播
            loss = loss_function(outputs, labels) # 计算损失
            loss.backward() 					  # 反向传播
            optimizer.step() 					  # 优化器更新参数

            # 打印耗时、损失、准确率等数据
            running_loss += loss.item()
            if step % 1000 == 999:    # print every 1000 mini-batches,每1000步打印一次
                with torch.no_grad(): # 在以下步骤中(验证过程中)不用计算每个节点的损失梯度,防止内存占用
                    outputs = net(test_image) 				 # 测试集传入网络(test_batch_size=10000),output维度为[10000,10]
                    predict_y = torch.max(outputs, dim=1)[1] # 以output中值最大位置对应的索引(标签)作为预测输出
                    accuracy = (predict_y == test_label).sum().item() / test_label.size(0)
                    
                    print('[%d, %5d] train_loss: %.3f  test_accuracy: %.3f' %  # 打印epoch,step,loss,accuracy
                        (epoch + 1, step + 1, running_loss / 1000, accuracy))
                    
                    print('%f s' % (time.perf_counter() - time_start))        # 打印耗时
                    running_loss = 0.0

    print('Finished Training')

    # 保存训练得到的参数
    save_path = './Lenet.pth'
    torch.save(net.state_dict(), save_path)
Example #22
0
def model_fn(features, labels, mode, params):
    """The model_fn argument for creating an Estimator."""
    model = LeNet(params["data_format"])
    image = features
    if isinstance(image, dict):
        image = features["image"]

    if mode == tf.estimator.ModeKeys.EVAL:
        logits = model(image, training=False)
        loss = tf.losses.sparse_softmax_cross_entropy(labels=labels,
                                                      logits=logits)
        return tf.estimator.EstimatorSpec(
            mode=tf.estimator.ModeKeys.EVAL,
            loss=loss,
            eval_metric_ops={
                "accuracy":
                tf.metrics.accuracy(labels=labels,
                                    predictions=tf.argmax(logits, axis=1))
            },
        )
Example #23
0
def gpu_train():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)
    # 或者
    # device = torch.device("cuda")
    # 或者
    # device = torch.device("cpu")

    net = LeNet()
    net.to(device) # 将网络分配到指定的device中
    loss_function = nn.CrossEntropyLoss() 
    optimizer = optim.Adam(net.parameters(), lr=0.001) 

    for epoch in range(5): 

        running_loss = 0.0
        time_start = time.perf_counter()
        for step, data in enumerate(train_loader, start=0):
            inputs, labels = data
            optimizer.zero_grad()
            outputs = net(inputs.to(device))				  # 将inputs分配到指定的device中
            loss = loss_function(outputs, labels.to(device))  # 将labels分配到指定的device中
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
            if step % 1000 == 999:    
                with torch.no_grad(): 
                    outputs = net(test_image.to(device)) # 将test_image分配到指定的device中
                    predict_y = torch.max(outputs, dim=1)[1]
                    accuracy = (predict_y == test_label.to(device)).sum().item() / test_label.size(0) # 将test_label分配到指定的device中

                    print('[%d, %5d] train_loss: %.3f  test_accuracy: %.3f' %
                        (epoch + 1, step + 1, running_loss / 1000, accuracy))

                    print('%f s' % (time.perf_counter() - time_start))
                    running_loss = 0.0

    print('Finished Training')

    save_path = './Lenet.pth'
    torch.save(net.state_dict(), save_path)

mnist = tf.keras.datasets.mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# x_train = np.pad(x_train, ((0,0),(2,2),(2,2)), 'constant')
# x_test = np.pad(x_test, ((0,0),(2,2),(2,2)), 'constant')

# Setup network input and output
tf_x = tf.placeholder(tf.float32, (None, 1, 28, 28))
tf_y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(tf_y, 10)

# Build neural network
logits = LeNet(tf_x)

# Define loss
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                        labels=one_hot_y)
loss_mean = tf.reduce_mean(cross_entropy)

# Define optimizer
initial_lr = 0.001
decay_rate = 0.6
decay_steps = 10  # 10 epoches
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(initial_lr,
                                           global_step,
                                           decay_steps,
                                           decay_rate,
test_dataset = GCommandLoader(args.test_path,
                              window_size=args.window_size,
                              window_stride=args.window_stride,
                              window_type=args.window_type,
                              normalize=args.normalize)
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=args.test_batch_size,
                                          shuffle=None,
                                          num_workers=20,
                                          pin_memory=args.cuda,
                                          sampler=None)

# build model
if args.arc == 'LeNet':
    model = LeNet()
elif args.arc.startswith('VGG'):
    model = VGG(args.arc)
else:
    model = LeNet()

if args.cuda:
    print('Using CUDA with {0} GPUs'.format(torch.cuda.device_count()))
    model = torch.nn.DataParallel(model, device_ids=[0]).cuda()

# define optimizer
if args.optimizer.lower() == 'adam':
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
elif args.optimizer.lower() == 'sgd':
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))

    batch_size = 16
    epochs = 200

    data_transform = {
        "train":
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        "val":
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    }

    data_root = os.path.abspath(os.path.join(os.getcwd(),
                                             "."))  # get data root path
    image_path = os.path.join(data_root, "data_set",
                              "flower_data")  # flower data set path
    assert os.path.exists(image_path), "{} path does not exist.".format(
        image_path)
    train_dataset = datasets.ImageFolder(root=os.path.join(
        image_path, "train"),
                                         transform=data_transform["train"])
    train_num = len(train_dataset)

    # {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
    flower_list = train_dataset.class_to_idx
    cla_dict = dict((val, key) for key, val in flower_list.items())
    # write dict into json file
    json_str = json.dumps(cla_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0,
              8])  # number of workers
    print('Using {} dataloader workers every process'.format(nw))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=nw)

    validate_dataset = datasets.ImageFolder(root=os.path.join(
        image_path, "val"),
                                            transform=data_transform["val"])
    val_num = len(validate_dataset)
    validate_loader = torch.utils.data.DataLoader(validate_dataset,
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  num_workers=nw)

    print("using {} images for training, {} images for validation.".format(
        train_num, val_num))

    # create model
    net = LeNet(num_classes=5)

    # load pretrained weights
    # download url: https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth
    # download url: https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth
    # model_weight_path = "weights/LeNet_pretrained.pth"
    # assert os.path.exists(model_weight_path), "file {} dose not exist.".format(model_weight_path)
    # pre_weights = torch.load(model_weight_path, map_location=device)

    # delete classifier weights
    # pre_dict = {k: v for k, v in pre_weights.items() if net.state_dict()[k].numel() == v.numel()}
    # missing_keys, unexpected_keys = net.load_state_dict(pre_dict, strict=False)
    #
    # # freeze features weights
    # for param in net.conv_stem.parameters():
    #     param.requires_grad = False
    #
    # for param in net.bn1.parameters():
    #     param.requires_grad = False
    #
    # for param in net.act1.parameters():
    #     param.requires_grad = False
    #
    # for param in net.blocks.parameters():
    #     param.requires_grad = False

    net.to(device)

    # define loss function
    loss_function = nn.CrossEntropyLoss()

    # construct an optimizer
    params = [p for p in net.parameters() if p.requires_grad]
    optimizer = optim.Adam(params, lr=0.0001)

    best_acc = 0.0
    save_path = 'weights/lenet.pth'
    train_steps = len(train_loader)
    for epoch in range(epochs):
        # train
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader)
        for step, data in enumerate(train_bar):
            images, labels = data
            optimizer.zero_grad()
            logits = net(images.to(device))
            loss = loss_function(logits, labels.to(device))
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()

            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(
                epoch + 1, epochs, loss)

        # validate
        net.eval()
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            val_bar = tqdm(validate_loader)
            for val_data in val_bar:
                val_images, val_labels = val_data
                outputs = net(val_images.to(device))
                # loss = loss_function(outputs, test_labels)
                predict_y = torch.max(outputs, dim=1)[1]
                acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

                val_bar.desc = "valid epoch[{}/{}]".format(epoch + 1, epochs)
        val_accurate = acc / val_num
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))

        if val_accurate > best_acc:
            best_acc = val_accurate
            torch.save(net.state_dict(), save_path)

    print('Finished Training')
def train_and_test(flags,
                   corruption_level=0,
                   gold_fraction=0.5,
                   get_C=uniform_mix_C):
    np.random.seed(1)
    torch.manual_seed(1)
    torch.cuda.manual_seed(1)

    C = get_C(corruption_level)

    gold, silver = prepare_data(C, gold_fraction)

    print("Gold shape = {}, Silver shape = {}".format(gold.images.shape,
                                                      silver.images.shape))

    # TODO : test on whole set
    test_x = torch.from_numpy(mnist.test.images[:500].reshape([-1, 1, 28, 28]))
    test_y = torch.from_numpy(mnist.test.labels[:500]).type(torch.LongTensor)
    print("Test shape = {}".format(test_x.shape))

    model = LeNet()
    optimizer = torch.optim.Adam([p for p in model.parameters()], lr=0.001)

    for step in range(flags.num_steps):
        x, y = silver.next_batch(flags.batch_size)
        y, y_true = np.array([l[0] for l in y]), np.array([l[1] for l in y])
        x_val, y_val = gold.next_batch(min(flags.batch_size, flags.nval))

        x, y = torch.from_numpy(x.reshape(
            [-1, 1, 28, 28])), torch.from_numpy(y).type(torch.LongTensor)
        x_val, y_val = torch.from_numpy(x_val.reshape(
            [-1, 1, 28, 28])), torch.from_numpy(y_val).type(torch.LongTensor)

        # forward
        if flags.method == "l2w":
            ex_wts = reweight_autodiff(model, x, y, x_val, y_val)
            logits, loss = model.loss(x, y, ex_wts)

            if step % dbg_steps == 0:
                tbrd.log_histogram("ex_wts", ex_wts, step=step)
                tbrd.log_value("More_than_0.01",
                               sum([x > 0.01 for x in ex_wts]),
                               step=step)
                tbrd.log_value("More_than_0.05",
                               sum([x > 0.05 for x in ex_wts]),
                               step=step)
                tbrd.log_value("More_than_0.1",
                               sum([x > 0.1 for x in ex_wts]),
                               step=step)

                mean_on_clean_labels = np.mean(
                    [ex_wts[i] for i in range(len(y)) if y[i] == y_true[i]])
                mean_on_dirty_labels = np.mean(
                    [ex_wts[i] for i in range(len(y)) if y[i] != y_true[i]])
                tbrd.log_value("mean_on_clean_labels",
                               mean_on_clean_labels,
                               step=step)
                tbrd.log_value("mean_on_dirty_labels",
                               mean_on_dirty_labels,
                               step=step)
        else:
            logits, loss = model.loss(x, y)

        print("Loss = {}".format(loss))

        # backward
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        tbrd.log_value("loss", loss, step=step)

        if step % dbg_steps == 0:
            model.eval()

            pred = torch.max(model.forward(test_x), 1)[1]
            test_acc = torch.sum(torch.eq(pred, test_y)).item() / float(
                test_y.shape[0])
            model.train()

            print("Test acc = {}.".format(test_acc))
            tbrd.log_value("test_acc", test_acc, step=step)
Example #28
0
                    help='Learning rate decay',
                    type=float)
parser.add_argument('--batch_size', default=80, help='Batch Size', type=int)
parser.add_argument('--data_augmentation',
                    default=True,
                    help='Using data augmentation',
                    type=int)
parser.add_argument('--grayscale',
                    default=True,
                    help='Using data augmentation',
                    type=int)
parser.add_argument('--keep_prob',
                    default=0.8,
                    help='Keep probability for dropout',
                    type=int)
config = parser.parse_args()
trainBG = BatchGenerator(X_train, y_train, config.batch_size, config.grayscale,
                         'train')
validBG = BatchGenerator(X_valid, y_valid, config.batch_size, config.grayscale)
testBG = BatchGenerator(X_valid, y_valid, config.batch_size, config.grayscale)
config.decay_steps = trainBG.num_batches * config.num_epoch
label_dict = {}
with open('signnames.csv') as f:
    reader = csv.DictReader(f)
    for row in reader:
        label_dict[row['ClassId']] = row['SignName']
#vgg = VGGsimple(config, label_dict)
#vgg.train(trainBG, validBG, config.num_epoch)
lenet = LeNet(config, label_dict)
lenet.train(trainBG, validBG, config.num_epoch)
Example #29
0
# 定义dataloader
cifar_train = dataset.Cifar10Dataset('./cifar10/train', transform=transforms.Compose([
    transforms.Resize((32, 32)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
cifar_test = dataset.Cifar10Dataset('./cifar10/test', transform=transforms.Compose([
    transforms.Resize((32, 32)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
cifar_train_loader = DataLoader(cifar_train, batch_size=batch_size, shuffle=True)
cifar_test_loader = DataLoader(cifar_test, batch_size=batch_size, shuffle=False)


net = LeNet()
if MULTI_GPU:
    net = nn.DataParallel(net,device_ids=device_ids)
net.to(device)
criteon = nn.CrossEntropyLoss()

optimizer=optim.Adam(net.parameters(), lr=1e-3)
scheduler = StepLR(optimizer, step_size=100, gamma=0.1)
if MULTI_GPU:
    optimizer = nn.DataParallel(optimizer, device_ids=device_ids)
    scheduler = nn.DataParallel(scheduler, device_ids=device_ids)

# print(net)
for epoch in range(epoch_num):
    for batchidx, (label, img) in enumerate(cifar_train_loader):
        net.train()
Example #30
0
#10000张验证图片
val_set = torchvision.datasets.CIFAR10(root='./data',
                                       train=False,
                                       download=False,
                                       transform=transform)
val_loader = torch.utils.data.DataLoader(val_set,
                                         batch_size=5000,
                                         shuffle=False,
                                         num_workers=0)
val_data_iter = iter(val_loader)
val_image, val_label = val_data_iter.next()

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

net = LeNet()
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)

for epoch in range(50):  # loop over the dataset multiple times

    running_loss = 0.0
    for step, data in enumerate(train_loader, start=0):
        # get the inputs; data is a list of [inputs, labels]
        inputs, labels = data

        # zero the parameter gradients
        optimizer.zero_grad()
        # forward + backward + optimize
        outputs = net(inputs)
        loss = loss_function(outputs, labels)
import numpy as np
import chainer
from chainer import cuda
import chainer.functions as F
from chainer import optimizers
from chainer import serializers
from random import randint
import cv2

from model import LeNet



point = 1
predict_num = 1
model = LeNet(predict_num)
img_size = 32
"""
if model_name == 'AlexNet':
    from cnn import AlexNet
    model = AlexNet(predict_num)
    img_size = 227
"""

gpu = -1
if gpu >= 0:
    cuda.get_device(args.gpu).use()
    model.to_gpu()
xp = np if gpu < 0 else cuda.cupy
optimizer = optimizers.Adam()
optimizer.setup(model)