コード例 #1
0
def main():
    ctpn = CTPN(cfg)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    ctpn.load_ckpt(sess) # ctpn load
    
    
    if cfg.ADJUST_ANGLE:
        angle_detector = VGG(cfg) # vgg load
        angle_detector.load_weights()
        
    data = DataLoader(cfg)
    text = TextGenerator(cfg)
    
    densenet = DenseNet(cfg,text.nrof_classes)
    densenet.load()
    
#    image_path = raw_input("please input your image path and name:") # get image path
    image_path = str('/home/jwm/Desktop/OCR-standard/images/xuanye.jpg')
    img = data.load_data(image_path)
    t = time.time()    
    if cfg.ADJUST_ANGLE:
        img = rotate(img, angle_detector) # rotate image if necessary
        
#    img = cv2.resize(img, (2000,3000),interpolation=cv2.INTER_CUBIC)
    
    text_recs, detected_img, img = detect(img, data, ctpn, sess) # detect text
    results = recognize(img, text_recs, densenet, text, adjust=False) # recognize text
    print("It takes time:{}s".format(time.time() - t))
    for key in results:
        print(results[key][1])
コード例 #2
0
def train():
    '''
       --DATA_DIR /media/jwm/DATA/work/data/CRNN
       --PRETRAINED_MODEL /home/jwm/Desktop/OCR-standard/experiments/densenet/weights_densenet.h5
       --SAVED_PATH experiments/densenet_ckpt
       --DENSENET_LOGGER experiments/densenet_logger
    '''
    data = TextGenerator(cfg)
    model = DenseNet(cfg, data.nrof_classes)
    trainer = DenseNetTrainer(cfg, data, model)
    trainer.train()
コード例 #3
0
    train_idx, valid_idx = train_valid_split(train_data, 0.1, shuffle=True)
    train_sampler = SubsetRandomSampler(train_idx)
    valid_sampler = SubsetRandomSampler(valid_idx)

    train_loader = DataLoader(train_data, batch_size=opt.batch_size, sampler=train_sampler,
                              num_workers=8, pin_memory=True)
    valid_loader = DataLoader(train_data, batch_size=opt.batch_size, sampler=valid_sampler,
                              num_workers=2, pin_memory=True)

    test_loader = DataLoader(test_data, batch_size=opt.batch_size, shuffle=False)

    # get data channels
    channels = [opt.close_size*opt.nb_flow,
                opt.period_size*opt.nb_flow,
                opt.trend_size*opt.nb_flow]
    model = DenseNet(nb_flows=opt.nb_flow, drop_rate=opt.drop_rate, channels=channels).cuda()
    optimizer = optim.Adam(model.parameters(), opt.lr)
    # optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=0.9)
    # print(model)

    if not os.path.exists(opt.save_dir):
        os.makedirs(opt.save_dir)
    if not os.path.isdir(opt.save_dir):
        raise Exception('%s is not a dir' % opt.save_dir)

    if opt.loss == 'l1':
        criterion = nn.L1Loss().cuda()
    elif opt.loss == 'l2':
        criterion = nn.MSELoss().cuda()

    print('Training...')
コード例 #4
0
def train(mode, dv):

    wandb.init(name=f'DenseNetGenOdinMode={mode}')
    device = torch.device(f'cuda:{dv}')

    model = DenseNet(mode=mode)
    model = model.to(device)
    if mode != -1:
        optimizer = get_optimizer(model)
    else:
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=0.1,
                                    momentum=0.9,
                                    weight_decay=0.0001)
    criterion = nn.CrossEntropyLoss()

    pickle_files = [
        'train_indices_cifar10.pickle', 'val_indices_cifar10.pickle'
    ]
    trainloader, _, testloader = cifar10loaders(train_batch_size=64,
                                                test_batch_size=64,
                                                pickle_files=pickle_files,
                                                resize=False)

    epochs = 300
    scheduler = MultiStepLR(optimizer,
                            milestones=[int(0.5 * epochs),
                                        int(0.75 * epochs)],
                            gamma=0.1)

    best_test_acc, best_test_loss = 0, 100

    for epoch in tqdm(range(epochs)):

        model.train()
        correct, total = 0, 0
        train_loss = 0
        for data in tqdm(trainloader):
            inputs, labels = data
            inputs = inputs.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()

            if mode == -1:
                outputs = model(inputs)
            else:
                outputs, h, g = model(inputs)
            loss = criterion(outputs, labels)
            train_loss += loss.item()
            loss.backward()
            optimizer.step()

            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

        train_accuracy = correct / total
        wandb.log({'epoch': epoch}, commit=False)
        wandb.log({
            'Train Set Loss': train_loss / trainloader.__len__(),
            'epoch': epoch
        })
        wandb.log({'Train Set Accuracy': train_accuracy, 'epoch': epoch})

        model.eval()
        correct, total = 0, 0
        test_loss = 0

        with torch.no_grad():

            for data in testloader:
                images, labels = data
                images = images.to(device)
                labels = labels.to(device)

                if mode == -1:
                    outputs = model(images)
                else:
                    outputs, h, g = model(images)
                t_loss = criterion(outputs, labels)
                test_loss += t_loss.item()

                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

            epoch_test_loss = test_loss / testloader.__len__()
            wandb.log({'Test Set Loss': epoch_test_loss, 'epoch': epoch})
            epoch_accuracy = correct / total
            wandb.log({'Test Set Accuracy': epoch_accuracy, 'epoch': epoch})

        if epoch_accuracy > best_test_acc:
            best_test_acc = epoch_accuracy
            torch.save(
                model.state_dict(),
                f'/raid/ferles/checkpoints/dense/dense_net_godin_{mode}_epoch_{epoch}_acc_{best_test_acc}.pth'
            )

        scheduler.step(epoch=epoch)
                                         predict_and_gradients, steps,
                                         num_random_trials, cuda):
    all_intgrads = []
    for i in range(num_random_trials):
        integrated_grad = integrated_gradients(inputs, model, target_label_idx, predict_and_gradients, \
                                                baseline=255.0 *np.random.random(inputs.shape), steps=steps, cuda=cuda)
        all_intgrads.append(integrated_grad)
        print('the trial number is: {}'.format(i))
    avg_intgrads = np.average(np.array(all_intgrads), axis=0)
    return avg_intgrads


if __name__ == '__main__':
    # start to create models...
    num_classes = 5
    model, input_size = DenseNet(num_classes=num_classes)
    model.load_state_dict(
        torch.load('weights/model3', map_location=torch.device('cpu')))
    model.eval()

    # read the image
    img = cv2.imread('separated-data/test/im_Dyskeratotic/001.bmp')
    mg = cv2.resize(img, (224, 224))
    img = img.astype(np.float32)
    img = img[:, :, (2, 1, 0)]
    # calculate the gradient and the label index
    gradients, label_index = calculate_outputs_and_gradients([img], model,
                                                             None)
    gradients = np.transpose(gradients[0], (1, 2, 0))
    img_gradient_overlay = visualize(gradients,
                                     img,
コード例 #6
0
def build_model(args, rot=False, dropout=None):

    json_options = json_file_to_pyobj(args.config)
    training_configurations = json_options.training

    modelName = training_configurations.model.lower() if not rot else 'rot' + training_configurations.model.lower()
    depth = int(training_configurations.depth)
    pretrained = True if training_configurations.pretrained == 'True' else False
    out_classes = training_configurations.out_classes
    print(out_classes)

    if modelName == 'wideresnet':
      from models.WideResNet import WideResNet
      if not pretrained:
        net = WideResNet(d=40, k=4, n_classes=out_classes, input_features=1, output_features=16, strides=[1, 1, 2, 2])
      else:
        net = WideResNet(d=40, k=4, n_classes=out_classes, input_features=3, output_features=16, strides=[1, 1, 2, 2])
      return net
    elif modelName == 'densenet':
        from models.DenseNet import DenseNet
        net = DenseNet(depth=121, growthRate=32, nClasses=out_classes)
        return net
    elif modelName == 'efficientnet':
        if depth in range(8):
            from efficientnet_pytorch import EfficientNet
            model = EfficientNet.from_pretrained('efficientnet-b{}'.format(depth))
            net = deepcopy(model)
            for param in net.parameters():
                param.requires_grad = True
            if not pretrained:
                net._conv_stem = nn.Conv2d(1, net._conv_stem.out_channels, kernel_size=3, stride=2, bias=False)
            net._fc = nn.Linear(model._fc.in_features, out_classes)
            if dropout is not None:
                net._dropout = torch.nn.Dropout(p=dropout)
            return net
        else:
            raise NotImplementedError('net not implemented')
    elif modelName == 'rotefficientnet':
        if depth in range(8):
            from efficientnet_pytorch.rot_model import RotEfficientNet
            model = RotEfficientNet.from_pretrained('efficientnet-b{}'.format(depth))
            net = deepcopy(model)
            for param in net.parameters():
                param.requires_grad = True
            net._fc = nn.Linear(model._fc.in_features, out_classes)
            return net
        else:
            raise NotImplementedError('net not implemented')
    elif modelName == 'genodinefficientnet':
        gen_odin_mode = training_configurations.gen_odin_mode
        if depth in range(8):
            from efficientnet_pytorch.gen_odin_model import GenOdinEfficientNet
            model = GenOdinEfficientNet.from_pretrained('efficientnet-b{}'.format(depth), mode=gen_odin_mode)
            from efficientnet_pytorch.gen_odin_model import CosineSimilarity
            model._fc_nominator = CosineSimilarity(feat_dim=1280, num_centers=out_classes)
            net = deepcopy(model)
            for param in net.parameters():
                param.requires_grad = True
            return net
        else:
            raise NotImplementedError('net not implemented')
    else:
        raise NotImplementedError('net not implemented')