コード例 #1
0
ファイル: train.py プロジェクト: liviudan/lifelong-learning
def init_model_and_optimizer(use_attention_improvement=False):
    model = Net(use_attention_improvement)
    if args.cuda:
        model.cuda()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    return model, optimizer
コード例 #2
0
def test():

    model = Net()  # 实例化一个网络
    model.cuda()  # 送入GPU,利用GPU计算
    model.load_state_dict(torch.load(model_file))  # 加载训练好的模型参数
    model.eval()  # 设定为评估模式,即计算过程中不要dropout

    datafile = DVCD('test', dataset_dir)  # 实例化一个数据集
    print('Dataset loaded! length of train set is {0}'.format(len(datafile)))

    index = np.random.randint(0, datafile.data_size,
                              1)[0]  # 获取一个随机数,即随机从数据集中获取一个测试图片
    img = datafile.__getitem__(index)  # 获取一个图像
    img = img.unsqueeze(
        0)  # 因为网络的输入是一个4维Tensor,3维数据,1维样本大小,所以直接获取的图像数据需要增加1个维度
    img = Variable(img).cuda()  # 将数据放置在PyTorch的Variable节点中,并送入GPU中作为网络计算起点
    out = model(img)  # 网路前向计算,输出图片属于猫或狗的概率,第一列维猫的概率,第二列为狗的概率
    print(out)  # 输出该图像属于猫或狗的概率
    if out[0, 0] > out[0, 1]:  # 猫的概率大于狗
        print('the image is a cat')
    else:  # 猫的概率小于狗
        print('the image is a dog')

    img = Image.open(datafile.list_img[index])  # 打开测试的图片
    plt.figure('image')  # 利用matplotlib库显示图片
    plt.imshow(img)
    plt.show()
コード例 #3
0
ファイル: test.py プロジェクト: xxxgp/DogsVsCats
def test():

    # setting model
    model = Net()                                       # 实例化一个网络
    model.cuda()                                        # 送入GPU,利用GPU计算
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(model_file))       # 加载训练好的模型参数
    model.eval()                                        # 设定为评估模式,即计算过程中不要dropout

    # get data
    files = random.sample(os.listdir(dataset_dir), N)   # 随机获取N个测试图像
    imgs = []           # img
    imgs_data = []      # img data
    for file in files:
        img = Image.open(dataset_dir + file)            # 打开图像
        img_data = getdata.dataTransform(img)           # 转换成torch tensor数据

        imgs.append(img)                                # 图像list
        imgs_data.append(img_data)                      # tensor list
    imgs_data = torch.stack(imgs_data)                  # tensor list合成一个4D tensor

    # calculation
    out = model(imgs_data)                              # 对每个图像进行网络计算
    out = F.softmax(out, dim=1)                         # 输出概率化
    out = out.data.cpu().numpy()                        # 转成numpy数据

    # pring results         显示结果
    for idx in range(N):
        plt.figure()
        if out[idx, 0] > out[idx, 1]:
            plt.suptitle('cat:{:.1%},dog:{:.1%}'.format(out[idx, 0], out[idx, 1]))
        else:
            plt.suptitle('dog:{:.1%},cat:{:.1%}'.format(out[idx, 1], out[idx, 0]))
        plt.imshow(imgs[idx])
    plt.show()
コード例 #4
0
 def test():

     model = Net()                                        #네트워크실제화
     model.cuda()                                         # GPU로 계산
     model.load_state_dict(torch.load(model_file))       # 학습된 모델 로딩
     model.eval()                                        # eval모델

     datafile = DVCD('test', dataset_dir)                # dataset 실례화
     print('Dataset loaded! length of train set is {0}'.format(len(datafile)))

     index = np.random.randint(0, datafile.data_size, 1)[0]      # random수로 image를 임의 선택함
     img = datafile.__getitem__(index)                           # image 하다 받다
     img = img.unsqueeze(0)                                      
     img = Variable(img).cuda()                                  # Data를 PyTorch의 Variable노드에서 놓고 ,또는 GPU에서 들어가고 시작점을 담임힘.
     out = model(img)                                            
     out = F.softmax(out, dim=1)                                        # SoftMax으로 2个개 output값을 [0.0, 1.0]을 시키다,합이1이다.
     print(out)                      # output는 개/고양이의 확률
     if out[0, 0] > out[0, 1]:                   # 개<고양이
         print('the image is a cat')
     else:                                       # 개>고양이
         print('the image is a dog')

     img = Image.open(datafile.list_img[index])      # text image open
     plt.figure('image')                             # matplotlib로  image show
     plt.imshow(img)
     plt.show()
コード例 #5
0
def define_model():
    # Define the Network Architecture
    print("Defining the Network Architecture...\n")
    # create a complete CNN
    model = Net()

    # move tensors to GPU if CUDA is available
    if train_on_gpu:
        model.cuda()
    return model
コード例 #6
0
def test():

    model_file = model_path + 'model.pth'
    model = Net()
    model.cuda()
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(model_file))
    model.eval()

    files = os.listdir(dataset_dir)
    imgs_data = []
    out1 = []
    for file in files:
        img = Image.open(dataset_dir + file)
        img_data = getdata.dataTransform(img)
        imgs_data.append(img_data)
        imgs_data = torch.stack(imgs_data)
        out = model(imgs_data)
        out = F.softmax(out, dim=1)
        out = out.data.cpu().numpy()
        out2 = out[0]
        out1.append(out2)
        imgs_data = []
    out3 = np.array(out1)
    x = []
    y = []

    for idx in range(len(files)):
        a = files[idx]
        (filename, extension) = os.path.splitext(a)
        b = int(filename)
        if b <= 900:
            y.append(1)
            if out3[idx, 0] > out3[idx, 1]:
                x.append(1)
            else:
                x.append(0)
        else:
            y.append(0)
            if out3[idx, 0] > out3[idx, 1]:
                x.append(1)
            else:
                x.append(0)

    p = metrics.precision_score(y, x)
    r = metrics.recall_score(y, x)
    f1 = metrics.f1_score(y, x)

    print('-------------test-----------------')
    print('precision: %f' % p)
    print('recall: %f' % r)
    print('f1_score: %f' % f1)
コード例 #7
0
ファイル: train.py プロジェクト: qijiayi-dev/OwlEye
def train():
    datafile = DATA('train', dataset_dir)
    dataloader = DataLoader(datafile,
                            batch_size=batch_size,
                            shuffle=True,
                            num_workers=workers,
                            drop_last=True)

    print('-------------train-----------------')
    print('Length of train set is {0}'.format(len(datafile)))
    model = Net()
    model = model.cuda()
    model = nn.DataParallel(model)
    model.train()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    criterion = torch.nn.CrossEntropyLoss()
    cnt = 0
    count = 0

    for epoch in range(nepoch):
        for img, label in dataloader:
            img, label = Variable(img).cuda(), Variable(label).cuda()
            out = model(img)
            loss = criterion(out, label.squeeze())
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            cnt += 1
            print('Epoch:{0},Frame:{1}, train_loss {2}'.format(
                epoch, cnt * batch_size, loss / batch_size))

        torch.save(model.state_dict(),
                   '{0}/{1}model.pth'.format(model_cp, count))
        val(count)
        count += 1
コード例 #8
0
def train():
    datafile = DVCD('train', dataset_dir)  # 实例化一个数据集
    dataloader = DataLoader(
        datafile,
        batch_size=batch_size,
        shuffle=True,
        num_workers=workers,
        drop_last=True)  # 用PyTorch的DataLoader类封装,实现数据集顺序打乱,多线程读取,一次取多个数据等效果

    print('Dataset loaded! length of train set is {0}'.format(len(datafile)))

    model = Net()  # 实例化一个网络
    model = model.cuda()  # 网络送入GPU,即采用GPU计算,如果没有GPU加速,可以去掉".cuda()"
    model = nn.DataParallel(model)
    model.train(
    )  # 网络设定为训练模式,有两种模式可选,.train()和.eval(),训练模式和评估模式,区别就是训练模式采用了dropout策略,可以放置网络过拟合

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=lr)  # 实例化一个优化器,即调整网络参数,优化方式为adam方法

    criterion = torch.nn.CrossEntropyLoss(
    )  # 定义loss计算方法,cross entropy,交叉熵,可以理解为两者数值越接近其值越小

    cnt = 0  # 训练图片数量
    for epoch in range(nepoch):
        # 读取数据集中数据进行训练,因为dataloader的batch_size设置为16,所以每次读取的数据量为16,即img包含了16个图像,label有16个
        for img, label in dataloader:  # 循环读取封装后的数据集,其实就是调用了数据集中的__getitem__()方法,只是返回数据格式进行了一次封装
            img, label = Variable(img).cuda(), Variable(
                label).cuda()  # 将数据放置在PyTorch的Variable节点中,并送入GPU中作为网络计算起点
            out = model(
                img)  # 计算网络输出值,就是输入网络一个图像数据,输出猫和狗的概率,调用了网络中的forward()方法
            loss = criterion(
                out, label.squeeze()
            )  # 计算损失,也就是网络输出值和实际label的差异,显然差异越小说明网络拟合效果越好,此处需要注意的是第二个参数,必须是一个1维Tensor
            loss.backward(
            )  # 误差反向传播,采用求导的方式,计算网络中每个节点参数的梯度,显然梯度越大说明参数设置不合理,需要调整
            optimizer.step()  # 优化采用设定的优化方法对网络中的各个参数进行调整
            optimizer.zero_grad(
            )  # 清除优化器中的梯度以便下一次计算,因为优化器默认会保留,不清除的话,每次计算梯度都回累加
            cnt += 1

            print('Epoch:{0},Frame:{1}, train_loss {2}'.format(
                epoch, cnt * batch_size,
                loss / batch_size))  # 打印一个batch size的训练结果

    torch.save(model.state_dict(),
               '{0}/model.pth'.format(model_cp))  # 训练所有数据后,保存网络的参数
コード例 #9
0
    ])
trainset = FER2013Dataset('/beegfs/jn1664/fer2013', train=True, transform=data_transform)
testset = FER2013Dataset('/beegfs/jn1664/fer2013', train=False, transform=data_transform)
#trainset = dsets.MNIST(root='.', train=True, download=True, transform=data_transform)
#testset = dsets.MNIST(root='.', train=False, download=True, transform=data_transform)

train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=0)
val_loader  = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=0)



n_gpu = args.GPU
print(n_gpu)
model = Net(7)
if n_gpu > 0:
    model.cuda()

optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
#optimizer = optim.Adam(model.parameters(), lr=args.lr)
def train(epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
	if n_gpu > 0:
	    data, target = Variable(data.cuda()), Variable(target.cuda())
	elif n_gpu==0:
	    data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
コード例 #10
0
from torch.autograd import Variable
from network import Net, MobileNet  # definition of the (custom) network architecture

print('Setting up network...')

net = Net(input_shape=target_shape,
          vgg16_basemodel=use_vgg16_basemodel,
          batch_normalization=use_batch_norm,
          dropout=use_dropout)
#net = MobileNet(input_shape=target_shape, nb_classes=1)
print(net)

if HAS_CUDA:
    # all the tensors in the module are converted to cuda
    net.cuda(gpu_id)

#%% ---------------------------------------------------------------------------
# Training.

criterion = torch.nn.SmoothL1Loss().cuda(gpu_id)
optimizer = torch.optim.SGD(net.parameters(),
                            lr=0.003,
                            momentum=0.9,
                            weight_decay=0.0005)
#optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=0.0005)


def lr_scheduler(optimizer, lr_decay=0.001, epoch=None, step=1):
    """Decay learning rate by a factor of lr_decay every step epochs.
    """
コード例 #11
0
ファイル: train.py プロジェクト: yl305237731/CBIR-pytorch
train_dataset = datasets.MNIST(root='./mnist/',
                               train=True,
                               transform=transforms.ToTensor())
train_loader = Data.DataLoader(dataset=train_dataset,
                               batch_size=BATCH_SIZE,
                               shuffle=True)

gpu = torch.cuda.is_available()
model = Net()
num_epochs = 20
epoch_size = train_dataset.__len__() // BATCH_SIZE
max_iter = epoch_size * num_epochs
loss = torch.nn.MSELoss()
if gpu:
    model = model.cuda()
    loss = loss.cuda()
optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=0.0001)

iteration = 0
for epoch in range(num_epochs):
    model.train()
    for i, (img, label) in enumerate(train_loader):
        optimizer.zero_grad()
        load_t0 = time.time()
        if gpu:
            img = img.cuda()
        out = model(img)
        loss_v = loss(out, img)
        loss_v.backward()
        optimizer.step()
コード例 #12
0
                        sampler=sampler,
                        shuffle=False,
                        num_workers=1)

    #    dataiter = iter(loader)
    #    images, labels = dataiter.next()
    #    print (images)
    #    images=tensor_to_img(images)
    #    print (labels)
    #    print (images)

    net = Net(batch_size)
    if load_checkpoint:
        net.load_state_dict(torch.load(SAVE_PATH))

    net.cuda()

    thld = np.arange(0, 1, 0.05)
    accu_tp = []
    accu_fp = []
    accu_iou = []
    for epoch in range(1):
        for i, data in enumerate(loader, 0):
            # get the inputs
            inputs, labels = data
            inputs, labels = inputs.float() / 256, labels.float()
            #
            #                # wrap them in Variable
            #
            inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
            #
コード例 #13
0
ファイル: trainprocess.py プロジェクト: jmjkx/lzy
writer = SummaryWriter(log_dir)

# ---------------------搭建网络--------------------------
cnn = Net()  # 创建CNN, 输入全连接层维数
print("Total number of paramerters in networks is {}  ".format(sum(x.numel() for x in cnn.parameters())))
cnn = cnn.double()

# --------------------设置损失函数和优化器----------------------
optimizer = optim.Adam(cnn.parameters(), lr=0.001)  # lr:(de fault: 1e-3)优化器
criterion = nn.CrossEntropyLoss()  # 损失函数
scheduler = torch.optim.lr_scheduler.StepLR(
    optimizer, step_size=EPOCH / 2, gamma=0.5)  # 设置学习率下降策略

# --------------------训练------------------------------
# 使用GPU
cnn = cnn.cuda()
print('=========STAGE1==============')
for epoch in range(1):
    loss_sigma = 0.0  # 记录一个epoch的loss之和
    correct = 0.0
    total = 0.0
    scheduler.step()  # 更新学习率

    for batch_idx, data in enumerate(train_loader):
        # 获取图片和标签
        inputs, labels = data
        inputs = inputs.double()
        labels = labels.double()
        inputs, labels = inputs.cuda(), labels.cuda()
        optimizer.zero_grad()  # 清空梯度
        cnn = cnn.train()
コード例 #14
0
def train(n_epochs=n_epochs,
          train_loader=train_loader,
          valid_loader=valid_loader,
          save_location_path=save_location_path):

    train_on_gpu = torch.cuda.is_available()

    def init_weights(m):
        if isinstance(m, nn.Linear):
            I.xavier_uniform_(m.weight)

    model = Net()
    model.apply(init_weights)

    if train_on_gpu:
        model.cuda()

    criterion = nn.MSELoss()
    optimizer = optim.Adam(params=model.parameters(), lr=0.001)

    valid_loss_min = np.Inf

    model.train()

    for epoch in range(1, n_epochs + 1):
        # Keep track of training and validation loss
        train_loss = 0.0
        valid_loss = 0.0

        for data in train_loader:

            # Grab the image and its corresponding label
            images = data['image']
            key_pts = data['keypoints']

            if train_on_gpu:
                images, key_pts = images.cuda(), key_pts.cuda()

            # Flatten keypoints & convert data to FloatTensor for regression loss
            key_pts = key_pts.view(key_pts.size(0), -1)
            if train_on_gpu:
                key_pts = key_pts.type(torch.cuda.FloatTensor)
                images = images.type(torch.cuda.FloatTensor)
            else:
                key_pts = key_pts.type(torch.FloatTensor)
                images = images.type(torch.FloatTensor)

            optimizer.zero_grad()  # Clear the gradient
            output = model(images)  # Forward
            loss = criterion(output, key_pts)  # Compute the loss
            loss.backward()  # Compute the gradient
            optimizer.step()  # Perform updates using calculated gradients

            train_loss += loss.item() * images.size(0)

        # Validation
        model.eval()
        for data in valid_loader:

            images = data['image']
            key_pts = data['keypoints']

            if train_on_gpu:
                images, key_pts = images.cuda(), key_pts.cuda()

            key_pts = key_pts.view(key_pts.size(0), -1)
            if train_on_gpu:
                key_pts = key_pts.type(torch.cuda.FloatTensor)
                images = images.type(torch.cuda.FloatTensor)
            else:
                key_pts = key_pts.type(torch.FloatTensor)
                images = images.type(torch.FloatTensor)

            output = model(images)
            loss = criterion(output, key_pts)

            valid_loss += loss.item() * images.size(0)

        # calculate average losses
        train_loss = train_loss / len(train_loader)
        valid_loss = valid_loss / len(valid_loader)

        print(
            f"epoch: {epoch} \t trainLoss: {train_loss} \t valLoss: {valid_loss}"
        )

        eviz.send_data(current_epoch=epoch,
                       current_train_loss=train_loss,
                       current_val_loss=valid_loss)
コード例 #15
0
ファイル: test_mnist.py プロジェクト: rrawther/pytorch-study
            data, target = data.cuda(args.device_id), target.cuda(
                args.device_id)
        data, target = Variable(data), Variable(target)
        output = model(data)
        test_loss += F.nll_loss(output, target, size_average=False)
        pred = output.data.max(1)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss /= len(data_loader.dataset)
    print("\n Test set: Average loss: {:.4f}, Accuracy: {}/{} ({}%)\n".format(
        test_loss, correct, len(data_loader.dataset),
        100.0 * float(correct) / len(data_loader.dataset)))


if __name__ == '__main__':
    args = set_args()
    args.cuda = torch.cuda.is_available()
    torch.manual_seed(args.seed)
    model = Net()
    if args.cuda:
        torch.cuda.manual_seed(args.seed)
        model.cuda(args.device_id)
        import torch.backends.cudnn as cudnn
        cudnn.benchmark = True

    # dataloader
    data_loader = test_loader(args)

    # start testing
    test(data_loader, model, args)
コード例 #16
0
    transforms.RandomGrayscale(),
    # transforms.RandomAffine((-30, 30)),
    # transforms.RandomRotation((-60, 60)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
train_set = torchvision.datasets.ImageFolder(paras.train_image,
                                             transform=transform)
train_loader = torch.utils.data.DataLoader(train_set,
                                           batch_size=batch_size,
                                           shuffle=True)

# network
net = Net()
if USE_GPU:
    net = net.cuda()

# CrossEntropy
criterion = nn.CrossEntropyLoss()

# SGD
# optimizer = optim.SGD(net.parameters(), lr=lr, momentum=momentum)

# train
loss_all = []
l1 = 0
l2 = 0
for e in range(epoch):

    if (e < 3):
        lr = paras.lr
コード例 #17
0
dataset_dir = './data/test/'                    # dataset경로          
model_cp = './model/model.pth'           #모델의위치            
workers = 10                                        #PyTorch가 DataLoader의수 읽기
batch_size = 20                                     #batch_size크기
lr = 0.0001                                            #학습된확률


def train():
       datafile = DVCD('train', dataset_dir)
       dataloader = DataLoader(datafile, batch_size=batch_size, shuffle=True, num_workers=workers)
       
        print('Dataset loaded! length of train set is {0}'.format(len(datafile)))

       model = Net()                                                    #네트워크실제화
       model = model.cuda()                                        # GPU로 계산
       model.train()                                                      # trainnig모델
  
       optimizer = torch.optim.Adam(model.parameters(), lr=lr)    #adam
      
       criterion = torch.nn.CrossEntropyLoss()   #CrossEntropyLoss 함수

       cnt = 0
  
      for img, label in dataloader:
           img, label = Variable(img).cuda(), Variable(label).cuda()
           out = model(img)                                    
           loss = criterion(out, label.squeeze())
           loss.backward() 
           optimizer.step()                                         
           optimizer.zero_grad()                                      
コード例 #18
0
ファイル: test.py プロジェクト: olnayoung/denoising
class testNetwork():
    def __init__(self):
        self.class_num = args.class_num
        self.batch_num = args.batch_num

        self.test_input_path = os.path.join(args.test_path, 'CORD', 'test',
                                            'BERTgrid')
        self.test_label_path = os.path.join(args.test_path, 'CORD', 'test',
                                            'LABELgrid')
        weight_path = os.path.join(args.test_path, 'params', args.weight)

        self.model = Net()
        self.model.cuda()
        self.model.load_state_dict(torch.load(weight_path))
        if args.lossname == 'CrossEntropy':
            self.criterion = nn.CrossEntropyLoss()

    def list_files(self, in_path):
        img_files = []
        for (dirpath, dirnames, filenames) in os.walk(in_path):
            for file in filenames:
                filename, ext = os.path.splitext(file)
                ext = str.lower(ext)
                if ext == '.png':
                    img_files.append(file)
                    # img_files.append(os.path.join(dirpath, file))
        img_files.sort()
        return img_files

    def load_data(self, img_names, label_names):
        assert len(img_names) == len(label_names)

        for idx in range(len(img_names)):
            image = cv2.imread(img_names[idx])
            label = cv2.imread(label_names[idx])

            images = np.zeros((1, self.b_size, self.b_size, 3))
            labels = np.zeros((1, self.b_size, self.b_size, 3))

            image = np.expand_dims(image, axis=0)
            label = np.expand_dims(label, axis=0)

            _, hei, wid, _ = image.shape
            for h in range(0, hei, self.b_size):
                for w in range(0, wid, self.b_size):
                    if h + self.b_size < hei:
                        start_h, end_h = h, h + self.b_size
                    else:
                        start_h, end_h = hei - self.b_size - 1, hei - 1

                    if w + self.b_size < wid:
                        start_w, end_w = w, w + self.b_size
                    else:
                        start_w, end_w = wid - self.b_size - 1, wid - 1

                    temp_image = image[:, start_h:end_h, start_w:end_w, :]
                    temp_label = label[:, start_h:end_h, start_w:end_w, :]

                    images = np.concatenate((images, temp_image), axis=0)
                    labels = np.concatenate((labels, temp_label), axis=0)

        return images, labels

    def val_epoch(self, input_lists):
        self.model.eval()

        losses = 0

        for batch in range(int(len(input_lists) / self.batch_num)):
            self.optimizer.zero_grad()
            input_list, label_list = [], []

            for num in range(self.batch_num):
                idx = batch * self.batch_num + num
                filename = input_lists[idx]

                input_list.append(self.input_path + '/' + filename)
                label_list.append(self.label_path + '/' + filename)

            train_input, train_label = self.load_data(input_list, label_list)

            input_tensor = torch.tensor(train_input, dtype=torch.float).cuda()
            input_tensor = input_tensor.permute(0, 3, 1, 2)
            label_tensor = torch.tensor(train_label, dtype=torch.float).cuda()
            label_tensor = label_tensor.permute(0, 3, 1, 2)

            output = self.model(input_tensor)
            loss = self.criterion(output, label_tensor)
            losses += loss.item()

            return losses / self.batch_num

    # def testMany(self):
    #     test_lists = self.list_files(self.test_input_path)
    #     val_loss, val_acc, val_mic, val_mac = self.val_epoch(test_lists)

    #     print('\tVal Loss: %.3f | Val Acc: %.2f%% | mic: %.2f%% | mac: %.2f%%' % (val_loss, val_acc*100, val_mic*100, val_mac*100))

    def testOne(self, img_path):
        self.model.eval()

        input = cv2.imread(img_path)
        input_tensor = torch.tensor(input, dtype=torch.float).cuda()
        input_tensor = input_tensor.unsqueeze(0)
        input_tensor = input_tensor.permute(0, 3, 1, 2)

        output = self.model(input_tensor)
        output = output.permute(0, 2, 3, 1)
        output = output.squeeze(0)
        output = output.cpu().detach().numpy()

        return output
コード例 #19
0
def main():

    BATCH_SIZE = 100
    LR = 1e-3
    EPOCHS = 15

    print('Number of epochs: ', EPOCHS)
    print('Learning Rate: ', LR)
    print('Batch size: ', BATCH_SIZE)

    transforms_ = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0, ), (1, ))])
    mnist_train = datasets.FashionMNIST('../data',
                                        train=True,
                                        download=True,
                                        transform=transforms_)
    dataset_train = SiameseDataset(mnist_train)
    sTrainDataLoader = DataLoader(dataset_train,
                                  batch_size=BATCH_SIZE,
                                  shuffle=True,
                                  num_workers=6)

    mnist_test = datasets.FashionMNIST('../data',
                                       train=False,
                                       download=True,
                                       transform=transforms_)
    dataset_test = SiameseDataset(mnist_test)
    sTestDataLoader = DataLoader(dataset_test,
                                 batch_size=BATCH_SIZE,
                                 shuffle=False,
                                 num_workers=6)

    pairwise_loss = ParwiseLoss()
    net = Net()
    net.cuda()

    optimizer = optim.Adam(net.parameters(), lr=LR)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     mode='min',
                                                     factor=0.1,
                                                     patience=1,
                                                     verbose=True)
    train_losses = []
    test_losses = []

    for epoch in range(EPOCHS):
        print('Epoch:{} '.format(epoch))
        train_loss = train(net, optimizer, sTrainDataLoader, pairwise_loss)
        test_loss = test(net, sTestDataLoader, pairwise_loss)
        scheduler.step(test_loss)
        train_losses.append(train_loss)
        test_losses.append(test_loss)

    sTestDataLoader2 = DataLoader(dataset_test,
                                  batch_size=10000,
                                  shuffle=False,
                                  num_workers=6)
    emb = getembeddings(net, sTestDataLoader2)

    plotLoss(EPOCHS, train_losses, test_losses)
    plot2D(emb)