コード例 #1
0
    def __init__(self, args, train_loader, test_loader, writer=None):
        self.args = args
        self.model = args.model
        self.train_loader = train_loader
        self.test_loader = test_loader
        self.device = args.device
        self.recon_kernel_size = self.args.recon_kernel_size
        self.eps = 0.00316
        if self.args.resume: self.global_step = self.args.global_step
        else: self.global_step = 1  # args.global_step
        self.log_dir = os.path.join('exp', self.args.dir_save)
        self.model_dir = os.path.join(self.log_dir, 'model', self.args.model)
        self.print_freq = self.args.print_freq
        self.visualize_freq = self.args.visualize_freq
        self.writer = writer
        self.prediction = self.args.prediction

        if not os.path.exists(self.model_dir): os.makedirs(self.model_dir)

        print('Making the Network')
        if args.model == 'KPRCAN':
            self.diffuseNet = model.kprcan.KPRCAN(self.args).to(self.device)
            self.specularNet = model.kprcan.KPRCAN(self.args).to(self.device)
        elif args.model == 'KPRCN':
            self.diffuseNet = model.kprcn.KPRCN(self.args).to(self.device)
            self.specularNet = model.kprcn.KPRCN(self.args).to(self.device)
        else:
            self.diffuseNet = Net(self.args).to(self.device)
            self.specularNet = Net(self.args).to(self.device)
コード例 #2
0
 def __init__(self):
     self.actions = 4
     self.state_space = 37
     self.memory = deque(maxlen = 100000)
     self.randomness = 1
     self.least_randomness = 0.01
     self.randomness_decay = 0.995
     self.gamma = 0.95
     self.network = Net()
     self.qtarget_net = Net()
     self.optimizer = optim.Adam(self.network.parameters(), lr=3e-4)
     self.loss_func = nn.MSELoss()
     self.tau = 1e-3
コード例 #3
0
ファイル: test.py プロジェクト: xxxgp/DogsVsCats
def test():

    # setting model
    model = Net()                                       # 实例化一个网络
    model.cuda()                                        # 送入GPU,利用GPU计算
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(model_file))       # 加载训练好的模型参数
    model.eval()                                        # 设定为评估模式,即计算过程中不要dropout

    # get data
    files = random.sample(os.listdir(dataset_dir), N)   # 随机获取N个测试图像
    imgs = []           # img
    imgs_data = []      # img data
    for file in files:
        img = Image.open(dataset_dir + file)            # 打开图像
        img_data = getdata.dataTransform(img)           # 转换成torch tensor数据

        imgs.append(img)                                # 图像list
        imgs_data.append(img_data)                      # tensor list
    imgs_data = torch.stack(imgs_data)                  # tensor list合成一个4D tensor

    # calculation
    out = model(imgs_data)                              # 对每个图像进行网络计算
    out = F.softmax(out, dim=1)                         # 输出概率化
    out = out.data.cpu().numpy()                        # 转成numpy数据

    # pring results         显示结果
    for idx in range(N):
        plt.figure()
        if out[idx, 0] > out[idx, 1]:
            plt.suptitle('cat:{:.1%},dog:{:.1%}'.format(out[idx, 0], out[idx, 1]))
        else:
            plt.suptitle('dog:{:.1%},cat:{:.1%}'.format(out[idx, 1], out[idx, 0]))
        plt.imshow(imgs[idx])
    plt.show()
コード例 #4
0
 def test():

     model = Net()                                        #네트워크실제화
     model.cuda()                                         # GPU로 계산
     model.load_state_dict(torch.load(model_file))       # 학습된 모델 로딩
     model.eval()                                        # eval모델

     datafile = DVCD('test', dataset_dir)                # dataset 실례화
     print('Dataset loaded! length of train set is {0}'.format(len(datafile)))

     index = np.random.randint(0, datafile.data_size, 1)[0]      # random수로 image를 임의 선택함
     img = datafile.__getitem__(index)                           # image 하다 받다
     img = img.unsqueeze(0)                                      
     img = Variable(img).cuda()                                  # Data를 PyTorch의 Variable노드에서 놓고 ,또는 GPU에서 들어가고 시작점을 담임힘.
     out = model(img)                                            
     out = F.softmax(out, dim=1)                                        # SoftMax으로 2个개 output값을 [0.0, 1.0]을 시키다,합이1이다.
     print(out)                      # output는 개/고양이의 확률
     if out[0, 0] > out[0, 1]:                   # 개<고양이
         print('the image is a cat')
     else:                                       # 개>고양이
         print('the image is a dog')

     img = Image.open(datafile.list_img[index])      # text image open
     plt.figure('image')                             # matplotlib로  image show
     plt.imshow(img)
     plt.show()
コード例 #5
0
def test():

    model = Net()  # 实例化一个网络
    model.cuda()  # 送入GPU,利用GPU计算
    model.load_state_dict(torch.load(model_file))  # 加载训练好的模型参数
    model.eval()  # 设定为评估模式,即计算过程中不要dropout

    datafile = DVCD('test', dataset_dir)  # 实例化一个数据集
    print('Dataset loaded! length of train set is {0}'.format(len(datafile)))

    index = np.random.randint(0, datafile.data_size,
                              1)[0]  # 获取一个随机数,即随机从数据集中获取一个测试图片
    img = datafile.__getitem__(index)  # 获取一个图像
    img = img.unsqueeze(
        0)  # 因为网络的输入是一个4维Tensor,3维数据,1维样本大小,所以直接获取的图像数据需要增加1个维度
    img = Variable(img).cuda()  # 将数据放置在PyTorch的Variable节点中,并送入GPU中作为网络计算起点
    out = model(img)  # 网路前向计算,输出图片属于猫或狗的概率,第一列维猫的概率,第二列为狗的概率
    print(out)  # 输出该图像属于猫或狗的概率
    if out[0, 0] > out[0, 1]:  # 猫的概率大于狗
        print('the image is a cat')
    else:  # 猫的概率小于狗
        print('the image is a dog')

    img = Image.open(datafile.list_img[index])  # 打开测试的图片
    plt.figure('image')  # 利用matplotlib库显示图片
    plt.imshow(img)
    plt.show()
コード例 #6
0
def main():
    cfg = get_cfg()
    cfg.merge_from_file(
        '../output/one_cls_faster_rcnn_R_50_FPN_deconv/config.yaml')
    cfg.MODEL.WEIGHTS = '../output/one_cls_faster_rcnn_R_50_FPN_deconv/model_0044999.pth'
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8

    img_set_part1 = get_img_dicts(
        '/tcdata/test_dataset_3w/test_dataset_part1/image')
    img_set_part2 = get_img_dicts(
        '/tcdata/test_dataset_3w/test_dataset_part2/image')

    dataset = img_set_part1 + img_set_part2

    if DEBUG:
        dataset = dataset[:400]

    metric_net = Net(num_classes=29841,
                     feat_dim=512,
                     cos_layer=True,
                     dropout=0.,
                     image_net='resnet50',
                     pretrained=False)
    checkpoint = torch.load('../output/arcface_R_50/best_14.pt')
    metric_net.load_state_dict(checkpoint['model_state_dict'])

    inst_ds = infer_img(cfg, metric_net, dataset, bbox_scale='S')
    with open('../output/inference/pred_img.json', 'w') as f:
        json.dump(inst_ds, f)
コード例 #7
0
    def __init__(self):

        self.model_path = './model'

        self.net = Net(is_training=False)

        self.size = 96
コード例 #8
0
ファイル: train.py プロジェクト: qijiayi-dev/OwlEye
def train():
    datafile = DATA('train', dataset_dir)
    dataloader = DataLoader(datafile,
                            batch_size=batch_size,
                            shuffle=True,
                            num_workers=workers,
                            drop_last=True)

    print('-------------train-----------------')
    print('Length of train set is {0}'.format(len(datafile)))
    model = Net()
    model = model.cuda()
    model = nn.DataParallel(model)
    model.train()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    criterion = torch.nn.CrossEntropyLoss()
    cnt = 0
    count = 0

    for epoch in range(nepoch):
        for img, label in dataloader:
            img, label = Variable(img).cuda(), Variable(label).cuda()
            out = model(img)
            loss = criterion(out, label.squeeze())
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            cnt += 1
            print('Epoch:{0},Frame:{1}, train_loss {2}'.format(
                epoch, cnt * batch_size, loss / batch_size))

        torch.save(model.state_dict(),
                   '{0}/{1}model.pth'.format(model_cp, count))
        val(count)
        count += 1
コード例 #9
0
ファイル: main.py プロジェクト: kevinghst/spaceship_submit
def eval():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    np.random.seed(seed=46)

    model = Net()
    path = 'model.pth.tar'

    checkpoint = torch.load(path, map_location=torch.device('cpu'))
    model.load_state_dict(checkpoint)
    model = model.to(device)
    model.eval()

    ious = []
    for _ in tqdm(range(1000)):
        img, label = make_data()
        img = torch.from_numpy(np.asarray(img, dtype=np.float32))
        img = torch.unsqueeze(img, 0)
        img = torch.unsqueeze(img, 0)
        img = img.to(device)

        pred = model.predict(img)
        ious.append(score_iou(label, pred))

    ious = np.asarray(ious, dtype="float")
    ious = ious[~np.isnan(ious)]  # remove true negatives
    print((ious > 0.7).mean())
コード例 #10
0
def load_gen():
    """
    Chec the save path for the oldest generation then load all available nets
    """
    save_path = os.path.join(os.getcwd(),"SSwingEnv")
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    try:
        oldest_gen = max([int(x.split(" ")[1].strip(".pkljg")) for x in os.listdir(save_path)])
    except ValueError:
        oldest_gen = 0
    oldest_str = "Generation "+str(oldest_gen)+".pkl"
    gen_path = os.path.join(save_path, oldest_str)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    if oldest_gen != 0:
        nets = []
        file = open(gen_path, "rb")
        while True:
            try:
                nets.append(pickle.load(file))
            except EOFError:
                break
        file.close()
        nets = np.array(nets)
        try:
            nets = np.random.choice(nets, settings.number_nets, False)
        except:
            print("Insufficient saved nets. Taking Random Sample")
            nets = np.random.choice(nets, settings.number_nets, True)
    else:
        nets = np.array([Net() for _ in range(settings.number_nets)])
    return nets, oldest_gen, save_path
コード例 #11
0
ファイル: main.py プロジェクト: wangchuan/TensorFlow-Example
def main():
    train_data_reader = DataReader(FLAGS, dtype='train')
    test_data_reader = DataReader(FLAGS, dtype='test')

    with tf.Graph().as_default():
        net = Net(FLAGS)
        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        sess.run(init_op)

        saver = tf.train.Saver()

        if FLAGS.mode == 'train':
            do_train.run(FLAGS, sess, net, saver, train_data_reader,
                         test_data_reader)
        else:
            ckpt = tf.train.get_checkpoint_state(FLAGS.log_path)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                print("Model restored...")
            if FLAGS.mode == 'test':
                do_validate.run(sess, net, test_data_reader)
            else:
                do_train.run(FLAGS, sess, net, saver, train_data_reader,
                             test_data_reader)
コード例 #12
0
        def run(self):
            # initialization
            model = Net()
            model.to(self.device)

            # loop to get and do the tasks
            while True:
                task = self.input_queue.get()  # pick a task from the queue
                if isinstance(
                        task, Predictor._StopToken
                ):  # `_StopToken` is a signal to stop this worker
                    break

                # decode task, it can be anything you defined.
                task_id, x = task

                with torch.no_grad():
                    # do the task
                    x = x.to(self.device)
                    output = model(x)

                    # put the result into the queue
                    output = output.cpu(
                    )  # copy to cpu before send to another process
                self.output_queue.put((task_id, output))
コード例 #13
0
def get_pred_vid():
    cfg = get_cfg()
    cfg.merge_from_file(
        '../output/one_cls_faster_rcnn_R_50_FPN_deconv/config.yaml')
    cfg.MODEL.WEIGHTS = '../output/one_cls_faster_rcnn_R_50_FPN_deconv/model_0044999.pth'
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8

    vid_set_part1 = get_vid_dicts(
        '/tcdata/test_dataset_3w/test_dataset_part1/video')
    vid_set_part2 = get_vid_dicts(
        '/tcdata/test_dataset_3w/test_dataset_part2/video')

    dataset = vid_set_part1 + vid_set_part2

    if DEBUG:
        dataset = dataset[:400]

    metric_net = Net(num_classes=29841,
                     feat_dim=512,
                     cos_layer=True,
                     dropout=0.,
                     image_net='resnet50',
                     pretrained=False)
    checkpoint = torch.load('../output/arcface_R_50/best_14.pt')
    metric_net.load_state_dict(checkpoint['model_state_dict'])

    inst_ds = infer_vid(cfg,
                        metric_net,
                        dataset,
                        bbox_scale='S',
                        frames=[40, 120, 200, 280, 360])

    return inst_ds
コード例 #14
0
def main(args, experiment_id, trial_id):
    use_cuda = not args['no_cuda'] and torch.cuda.is_available()
    torch.set_num_threads(4)
    torch.manual_seed(args['seed'])
    device = torch.device("cuda" if use_cuda else "cpu")

    batch_size = args['batch_size']
    hidden_size = args['hidden_size']

    train_loader, test_loader = data_loader(batch_size)

    model = Net(hidden_size=hidden_size).to(device)
    optimizer = optim.SGD(model.parameters(),
                          lr=args['lr'],
                          momentum=args['momentum'])

    for epoch in range(1, args['epochs'] + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        test_acc = test(args, model, device, test_loader)

        # report intermediate result
        nni.report_intermediate_result(test_acc)
        logger.debug('test accuracy %g', test_acc)
        logger.debug('Pipe send intermediate result done.')
        torch.save(
            model.state_dict(),
            f'{os.path.join(os.getcwd())}/model_outputs/{experiment_id}-{trial_id}-model.pth'
        )

    test_acc = test(args, model, device, test_loader)
    # report final result
    nni.report_final_result(test_acc)
    logger.debug('Final result is %g', test_acc)
    output_logger.info(f'{experiment_id}|{trial_id}|{params}|{test_acc:0.6f}')
    logger.debug('Send final result done.')
コード例 #15
0
ファイル: main_function.py プロジェクト: PapaKache/Neural
    def __init__(self, parent=None):
        """
        Constructor
        
        @param parent reference to the parent widget
        @type QWidget
        """
        super(MainWindow, self).__init__(parent)
        self.setupUi(self)
        self.setWindowIcon(QIcon('image.ico'))
        #self.show()

        self.net = Net()

        bw1 = self.net.loadWeight('w1-save.csv')
        bw2 = self.net.loadWeight('w2-save.csv')
        lw1 = list(bw1)
        lw2 = list(bw2)
        weight1 = np.array(lw1)
        weight2 = np.array(lw2)
        self.weight1 = weight1.reshape(64, 365)
        self.weight2 = weight2.reshape(365, 49)

        self.horizontalSlider.setMinimum(0)
        self.horizontalSlider.setMaximum(100)
        self.horizontalSlider.setSingleStep(1)
        self.horizontalSlider.valueChanged.connect(self.valuechange)
        self.progressBarPercent.setValue(0)
        self.progressBarAcc.setValue(0)
        self.trainning = False
        self.work = True
        self.finsh_signal.connect(self.callFinish)
        self.progress_signal.connect(self.callProgress)
コード例 #16
0
def train():
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.5, ), (0.5, ))])
    trainset = torchvision.datasets.MNIST(root='./data',
                                          train=True,
                                          download=True,
                                          transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=100,
                                              shuffle=True,
                                              num_workers=2)
    testset = torchvision.datasets.MNIST(root='./data',
                                         train=False,
                                         download=True,
                                         transform=transform)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=100,
                                             shuffle=False,
                                             num_workers=2)
    classes = tuple(np.linspace(0, 9, 10, dtype=np.uint8))

    net = Net()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(),
                          lr=0.0005,
                          momentum=0.99,
                          nesterov=True)

    EPOCHS = 10
    for epoch in range(EPOCHS):
        running_loss = 0.0
        for i, (inputs, labels) in enumerate(trainloader, 0):
            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()
            if i % 100 == 99:
                print('[{:d}, {:5d}] loss: {:.3f}'.format(
                    epoch + 1, i + 1, running_loss / 100))
                running_loss = 0.0
    print('Finished Training')
    correct = 0
    total = 0
    with torch.no_grad():
        for (images, labels) in testloader:
            outputs = net(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    print('Accuracy: {:.2f} %%'.format(100 * float(correct / total)))

    torch.save(net.state_dict(), 'model.pth')
コード例 #17
0
ファイル: train.py プロジェクト: liviudan/lifelong-learning
def init_model_and_optimizer(use_attention_improvement=False):
    model = Net(use_attention_improvement)
    if args.cuda:
        model.cuda()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    return model, optimizer
コード例 #18
0
def main():
    args = parser()

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    trainset = Mydataset(root="./train_data", transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=64,
                                              shuffle=True,
                                              num_workers=2)
    testset = Mydataset(root="./test_data", transform=transform)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=64,
                                             shuffle=True,
                                             num_workers=2)

    model = Net()
    summary(model, (3, 64, 64))
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    x_epoch_data = []
    y_train_loss_data = []
    y_test_loss_data = []
    y_test_accuracy_data = []
    for epoch in range(1, args.epochs + 1):
        train_loss_per_epoch = train(args, model, trainloader, criterion,
                                     optimizer, epoch)
        test_loss_per_epoch, test_accuracy_per_epoch = test(
            args, model, testloader, criterion)

        x_epoch_data.append(epoch)
        y_train_loss_data.append(train_loss_per_epoch)
        y_test_loss_data.append(test_loss_per_epoch)
        y_test_accuracy_data.append(test_accuracy_per_epoch)

    plt.plot(x_epoch_data, y_train_loss_data, label="train_loss")
    plt.xlabel("epoch")
    plt.ylabel("loss")
    plt.legend(loc="upper right")
    plt.show()

    plt.plot(x_epoch_data, y_test_loss_data, label="test_loss")
    plt.xlabel("epoch")
    plt.ylabel("loss")
    plt.legend(loc="upper right")
    plt.show()

    plt.plot(x_epoch_data, y_test_accuracy_data, label="test_accuracy")
    plt.xlabel("epoch")
    plt.ylabel("accuracy")
    plt.legend(loc="lower right")
    plt.show()

    if (args.save_model):
        torch.save(model.state_dict(), "perfume_cnn.pt")
コード例 #19
0
def main():

    # Training Setting
    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()

    device = torch.device('cuda' if use_cuda else 'cpu')

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # Visualize the value of loss function in the train process
    visual = Visualization() if args.visualization else None

    # Define dataset
    train_dataset = CustomDataset('./train.txt')
    eval_dataset = CustomDataset('./eval.txt')

    # Define dataloader
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)

    eval_loader = torch.utils.data.DataLoader(dataset=eval_dataset,
                                              batch_size=args.eval_batch_size,
                                              shuffle=False,
                                              **kwargs)

    # Define nerual network model
    model = Net().to(device)

    # Set Optimizier to calculate gradients
    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

    # Scheduler of decreasing learning rate each epoch
    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)

    # Train neural network
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        eval_loss = evaluate(args, model, device, eval_loader)
        scheduler.step()

        print("R2: ", calculate_R(args, model, device, eval_loader))

        # Update image
        if args.visualization:
            visual.data_update(epoch, eval_loss)
            visual.render()

    # Save model
    if args.save_model:
        torch.save(model.state_dict(), "model_record.pt")

    # Keep showing picture after training model
    if args.visualization:
        visual.terminate()
コード例 #20
0
 def __init__(self, trained_weights: str, device: str):
     self.net = Net()
     self.weights = trained_weights
     self.device = torch.device('cuda:0' if device == 'cuda' else 'cpu')
     #self.preprocess = transforms.Compose([
     #transforms.Resize((300, 300)),
     #transforms.ToTensor(),
     #transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])
     self._initialize()
コード例 #21
0
def main(args):
    workspace_limits = np.asarray([[-0.724, -0.276], [-0.224, 0.224],
                                   [-0.0001, 0.4]])
    mode = args.mode
    config_file = args.config_file
    cfg = args.cfg
    obj_data = args.obj_data
    weights = args.weights

    robot = Robot(workspace_limits, args.config_file, fixed_obj_coord=True)

    # Load model
    ## yolo
    yolo = YOLO()
    yolo_net = yolo.load_net(cfg.encode(), weights.encode(), 0)
    meta = yolo.load_meta(obj_data.encode())
    ## cnn
    cnn_net = Net()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    cnn_net.to(device)
    save_path = "vision/classification/model/model.pt"
    cnn_net.load_state_dict(torch.load(save_path))

    cnt = 0
    for i in range(12):
        rgb_img, depth_img = robot.get_camera_data()
        rgb_img = cv2.cvtColor(rgb_img, cv2.COLOR_RGB2BGR)
        vis_img = rgb_img.copy()
        if cnt >= 10:
            r = yolo.detect(yolo_net, meta, rgb_img)
            for i in r:
                x, y, w, h = i[2][0], i[2][1], i[2][2], i[2][3]
                xmin, ymin, xmax, ymax = yolo.convertBack(
                    float(x), float(y), float(w), float(h))
                pt1 = (xmin, ymin)
                pt2 = (xmax, ymax)
                cv2.rectangle(vis_img, pt1, pt2, (0, 255, 0), 2)
                cv2.putText(
                    vis_img,
                    i[0].decode() + " [" + str(round(i[1] * 100, 2)) + "]",
                    (pt1[0], pt1[1] + 20), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    [0, 255, 0], 4)
            cv2.imshow("img", vis_img)

            # Classification
            for i, r0 in enumerate(r):
                bbox_info = get_bbox(r0)
                bbox_img, label = get_bbox_img(rgb_img, bbox_info)
                torch_bbox_img = torch.from_numpy(bbox_img).float().to(
                    device).permute(2, 0, 1).unsqueeze(0)
                output = cnn_net(torch_bbox_img)
                _, predicted = torch.max(output, 1)
                # show result
                print("category label = {}".format(label_to_text(predicted)))
                cv2.imshow("bbox img", bbox_img)
                cv2.waitKey(0)
        cnt += 1
コード例 #22
0
def main():
    # prepare dataset
    train_adversarial = 1
    use_cuda = True
    epochs = 2000
    lr = 0.0005

    train_set, test_set = get_datasets(balance_train=True)

    num_features = train_set[0][0].shape[0]
    batch_size = 400
    test_batch_size = 50
    torch.manual_seed(7347)

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print('using device {0}'.format(device))

    train_loader = torch.utils.data.DataLoader(train_set,
        batch_size=batch_size, shuffle=True)

    test_loader = torch.utils.data.DataLoader(test_set,
        batch_size=test_batch_size, shuffle=True)

    model = Net(activation=nn.LeakyReLU(),
            num_features=num_features,
            embed_size=80).to(device)
    PATH = None
    # model.load_state_dict(torch.load(PATH, map_location=device), strict=False)

    reconstruction_optimizer = optim.AdamW(model.autoenc_params(), lr=lr)
    discriminative_optimizer = optim.AdamW(model.disc_params(), lr=lr * 0.1)
    encoder_optimizer = optim.AdamW(model.enc_params(), lr=lr * 0.1)

    if train_adversarial:
        compute_loss = compute_loss_adversarial_enc
        optimizer = {'rec': reconstruction_optimizer,
                     'dis': discriminative_optimizer,
                     'enc': encoder_optimizer}
        tmp = [reconstruction_optimizer,
                discriminative_optimizer,
                encoder_optimizer]
        schedulers = [StepLR(x, step_size=70, gamma=0.9) for x in tmp]
    else:
        compute_loss = compute_loss_autoenc
        optimizer = {'rec': reconstruction_optimizer}
        schedulers = [StepLR(reconstruction_optimizer, step_size=50, gamma=0.9)]

    for epoch in range(1, epochs + 1):
        if epoch % 50 == 0:
            test(model, compute_loss, device, test_loader)
        train(model, compute_loss, device, train_loader, optimizer, epoch)
        for scheduler in schedulers:
            scheduler.step()
        if epoch % 100 == 0 and epoch:
            torch.save(model.state_dict(), "mnist_cnn{0}.pt".format(epoch))
        print('learning rate: {0}'.format(scheduler.get_lr()))
コード例 #23
0
    def __init__(self, args, train_loader, test_loader, writer=None):
        self.args = args
        self.model = args.model
        self.train_loader = train_loader
        self.test_loader = test_loader
        self.device = args.device
        self.recon_kernel_size = self.args.recon_kernel_size
        self.eps = 0.00316
        self.global_step = 0
        self.model_dir = os.path.join('model', self.args.model)
        self.print_freq = self.args.print_freq
        self.writer = writer

        if not os.path.exists(self.model_dir): os.makedirs(self.model_dir)

        print('Making the Network')

        self.diffuseNet = Net(self.args).to(self.device)
        self.specularNet = Net(self.args).to(self.device)
コード例 #24
0
def define_model():
    # Define the Network Architecture
    print("Defining the Network Architecture...\n")
    # create a complete CNN
    model = Net()

    # move tensors to GPU if CUDA is available
    if train_on_gpu:
        model.cuda()
    return model
コード例 #25
0
ファイル: agent.py プロジェクト: DanielAnthes/QLearning
 def __init__(self):
     self.lr = 0.001  # learning rate
     self.epsilon = 1  # probability with which the agent makes a random action choice instead of following its policy. Encourages exploration
     self.gamma = 0.95  # discount
     self.net = Net().to(DEVICE)
     self.optim = optim.RMSprop(self.net.parameters(), lr=self.lr)
     self.replaybuffer = list()
     self.maxbuf = 10000  # max size for replay buffer
     self.env = gym.make('LunarLander-v2')
     self.actions = [0,1,2,3]
コード例 #26
0
ファイル: API.py プロジェクト: taco1998/RESNET-tensorflow
    def __init__(self):

        self.net = Net(is_training=True)

        self.model_path = cfg.MODEL_PATH

        self.size = cfg.TARGET_SIZE

        self.bias = cfg.BIAS

        self.classes = cfg.CLASSES
コード例 #27
0
def main():
    embed_size = 80
    use_embed = True
    use_cuda = True
    epochs = 2000
    lr = 0.0001

    train_set, test_set = get_datasets(label_columns=['posOutcome'])
    # process with feature extractor
    # create new test and train sets

    num_features = train_set[0][0].shape[0]
    batch_size = 100
    test_batch_size = 50
    torch.manual_seed(7347)

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print('using device {0}'.format(device))

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=batch_size,
                                               shuffle=True)

    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=test_batch_size,
                                              shuffle=True)

    extractor = Net(activation=nn.LeakyReLU(),
                    num_features=num_features,
                    embed_size=embed_size).to(device)
    extractor_path = 'extractor.pt'
    extractor.load_state_dict(torch.load(extractor_path, map_location=device),
                              strict=False)
    model = ClassifierNet(
        num_features=embed_size if use_embed else num_features,
        activation=nn.LeakyReLU()).to(device)
    classifier_optimizer = optim.AdamW(model.parameters(), lr=lr)
    compute_loss = compute_classifier_loss
    optimizer = {'opt': classifier_optimizer}
    schedulers = [StepLR(classifier_optimizer, step_size=50, gamma=0.9)]
    callback = compute_classifier_loss
    if use_embed:
        callback = functools.partial(loss_callback, extractor=extractor)

    for epoch in range(1, epochs + 1):
        if epoch % 50 == 0 or epoch == 1:
            test(model, callback, device, test_loader)
        train(model, callback, device, train_loader, optimizer, epoch)
        for scheduler in schedulers:
            scheduler.step()
        if epoch % 100 == 0 and epoch:
            torch.save(model.state_dict(), "classifier{0}.pt".format(epoch))
        print('learning rate: {0}'.format(scheduler.get_lr()))
コード例 #28
0
 def __init__(self, num_states, num_actions, Double, Dueling, PER):
     self.num_actions = num_actions # 행동 가짓수(2)를 구함
     self.Double = Double
     self.Dueling = Dueling
     self.PER = PER
     
     # transition을 기억하기 위한 메모리 객체 생성
     self.memory = ReplayMemory(CAPACITY)
     
     # 신경망 구성
     n_in, n_mid, n_out = num_states, 32, num_actions
     self.main_q_network = Net(n_in, n_mid, n_out, Dueling) # Net 클래스를 사용
     self.target_q_network = Net(n_in, n_mid, n_out, Dueling) # Net 클래스를 사용
     print(self.main_q_network) # 신경망의 구조를 출력
     
     # 최적화 기법을 선택
     self.optimizer = optim.Adam(self.main_q_network.parameters(), lr=0.0001)
     
     # PER - TD 오차를 기억하기 위한 메모리 객체 생성
     if self.PER == True:
         self.td_error_memory = TDerrorMemory(CAPACITY)
コード例 #29
0
ファイル: train.py プロジェクト: kevinghst/spaceship_submit
def main():
    model = Net()

    # Part I - Train model to localize spaceship on images containing spaceship
    print("Start localization training")

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.to(device)
    optimizer = optim.Adam(model.parameters(), eps=1e-07)

    cudnn.benchmark = True
    criterion = cal_diou

    epochs = 40
    steps_per_epoch = 3125
    batch_size = 64

    for epoch in range(0, epochs):
        adjust_learning_rate(optimizer, epoch)
        train(model, optimizer, epoch, device, steps_per_epoch, batch_size,
              criterion)

    # Part II - Apply transfer learning to train pre-trained model to detect whether spaceship exists
    print("Start classification training")

    model.mode = 'classification'
    criterion = nn.BCELoss()

    for param in model.convnet.parameters():
        param.requires_grad = False

    for param in model.localizer.parameters():
        param.requires_grad = False

    batch_size = 64
    steps_per_epoch = 500
    epochs = 2

    optimizer = optim.Adam(model.parameters(), eps=1e-07)

    for epoch in range(epochs):
        train(model,
              optimizer,
              epoch,
              device,
              steps_per_epoch,
              batch_size,
              criterion,
              classification=True)

    # Save model
    path = F'model.pth.tar'
    torch.save(model.state_dict(), path)
コード例 #30
0
def main():
    net = Net([784,100,50,10])
    print("neurons: {}".format(net.neurons))
    B, W, acc, count = net.update(noise=0)
    x = np.array([i for i in range(count)])
    y = np.array([acc[i] for i in range(count)])
    plt.plot(x,y)
    plt.xlabel("epoch")
    plt.ylim(50.0, 100.0)
    plt.ylabel("accuracy[%]")
    plt.title("neurons: {}".format(net.neurons))
    plt.show()