def main():

    logger.info(args)

    model = Net(numAngle=CONFIGS["MODEL"]["NUMANGLE"],
                numRho=CONFIGS["MODEL"]["NUMRHO"],
                backbone=CONFIGS["MODEL"]["BACKBONE"])
    model = model.cuda(device=CONFIGS["TRAIN"]["GPU_ID"])

    if args.model:
        if isfile(args.model):
            logger.info("=> loading pretrained model '{}'".format(args.model))
            checkpoint = torch.load(args.model)
            model.load_state_dict(checkpoint['state_dict'])
            logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.model, checkpoint['epoch']))
        else:
            logger.info("=> no pretrained model found at '{}'".format(
                args.model))
    # dataloader
    test_loader = get_loader(CONFIGS["DATA"]["TEST_DIR"],
                             CONFIGS["DATA"]["TEST_LABEL_FILE"],
                             batch_size=1,
                             num_thread=CONFIGS["DATA"]["WORKERS"],
                             test=True)

    logger.info("Data loading done.")

    logger.info("Start testing.")
    total_time = test(test_loader, model, args)

    logger.info(
        "Test done! Total %d imgs at %.4f secs without image io, fps: %.3f" %
        (len(test_loader), total_time, len(test_loader) / total_time))
Example #2
0
def digit_prediction():
    if(request.method == "POST"):
        img = request.get_json()
        img = preprocess(img)
        net = Net()
        digit, probability = net.predict_with_pretrained_weights(img, 'pretrained_weights.pkl')
        data = { "digit":digit, "probability":float(int(probability*100))/100. }
        return jsonify(data)
Example #3
0
def digit_prediction():
    if(request.method == "POST"):
        img = request.get_json()
        img = preprocess(img)
        net = Net()
        digit, probability = net.predict_with_pretrained_weights(img, 'pretrained_weights.pkl')
        a= list(probability)
        b=[]
        for item in a:
        	b.append(str(item))
        prob = ' '.join(b)
        data = { "digit":digit, "probability": prob}
        return jsonify(data)
Example #4
0
def main():

    logger.info(args)

    model = Net(numAngle=CONFIGS["MODEL"]["NUMANGLE"],
                numRho=CONFIGS["MODEL"]["NUMRHO"],
                backbone=CONFIGS["MODEL"]["BACKBONE"])

    if args.model:
        if isfile(args.model):
            import torch
            m = torch.load(args.model)
            if 'state_dict' in m.keys():
                m = m['state_dict']
            torch.save(m, '_temp_model.pth')
            del m
            logger.info("=> loading pretrained model '{}'".format(args.model))
            #model.load('_temp_model.pth')
            logger.info("=> loaded checkpoint '{}'".format(args.model))
        else:
            logger.info("=> no pretrained model found at '{}'".format(
                args.model))
    # dataloader
    test_loader = get_loader(CONFIGS["DATA"]["TEST_DIR"],
                             CONFIGS["DATA"]["TEST_LABEL_FILE"],
                             batch_size=int(os.environ.get("BS", "1")),
                             num_thread=CONFIGS["DATA"]["WORKERS"],
                             test=True)
    logger.info("Data loading done.")

    weights_nodes = {}
    data_nodes = {}

    def named_dump_func(name):
        def dump_func(self, inputs, outputs):
            input_name = name + '_input'
            output_name = name + '_output'
            if isinstance(self, nn.Conv2d):
                weights_nodes[name] = self.weight.numpy()
            data_nodes[input_name] = inputs[0].numpy()
            data_nodes[output_name] = outputs[0].numpy()

        return dump_func

    if args.dump:
        logger.info('Add hooks to dump data.')
        for name, module in model.named_modules():
            print(name)
            module.register_forward_hook(named_dump_func(name))

    test(test_loader, model, args)
Example #5
0
def main():

    logger.info(args)

    model = Net(numAngle=CONFIGS["MODEL"]["NUMANGLE"], numRho=CONFIGS["MODEL"]["NUMRHO"], backbone=CONFIGS["MODEL"]["BACKBONE"])

    if args.model:
        if isfile(args.model):
            logger.info("=> loading pretrained model '{}'".format(args.model))
            import torch
            m = torch.load(args.model)
            if 'state_dict' in m.keys():
                m = m['state_dict']
            torch.save(m, '_temp_model.pth')
            del m
            model.load('_temp_model.pth')
            logger.info("=> loaded checkpoint '{}'".format(args.model))
        else:
            logger.info("=> no pretrained model found at '{}'".format(args.model))
    # dataloader
    test_loader = get_loader(CONFIGS["DATA"]["TEST_DIR"], CONFIGS["DATA"]["TEST_LABEL_FILE"], 
                                batch_size=1, num_thread=CONFIGS["DATA"]["WORKERS"], test=True)
    logger.info("Data loading done.")
    
    weights_nodes = {}
    data_nodes = {}

    def named_dump_func(name):
        def dump_func(self, inputs, outputs):
            input_name = name + '_input'
            output_name = name + '_output'
            if isinstance(self, nn.Conv2d):
                weights_nodes[name] = self.weight.numpy()
            data_nodes[input_name] = inputs[0].numpy()
            data_nodes[output_name] = outputs[0].numpy()
        return dump_func

    if args.dump:
        logger.info('Add hooks to dump data.')
        for name, module in model.named_modules():
            module.register_forward_hook(named_dump_func(name))

    logger.info("Start testing.")
    total_time = test(test_loader, model, args)

    if args.dump:
        np.save('data_nodes.npy', data_nodes)
        np.save('weights_nodes.npy', weights_nodes)
        exit()

    logger.info("Test done! Total %d imgs at %.4f secs without image io, fps: %.3f" % (len(test_loader), total_time, len(test_loader) / total_time))
Example #6
0
File: run.py Project: rvk007/EVA4
def run_model(device,
              train_loader,
              val_loader,
              epochs,
              learning_rate,
              momentum,
              dropout,
              step_size,
              gamma,
              l1=0.0,
              l2=0.0):
    losses = []
    accuracies = []
    incorrect_samples = []

    print('\nCreating model')
    model = Net(dropout).to(device)  # Create model
    model_summary(model)  # Display model summary

    optimizer = sgd_optimizer(model, learning_rate, l2,
                              momentum)  # Create optimizer
    scheduler = lr_scheduler(optimizer, step_size, gamma)  # Set LR scheduler

    for epoch in range(1, epochs + 1):
        print(f'Epoch {epoch}:')
        train(model, device, train_loader, optimizer, epoch, l1)
        scheduler.step()
        val(model, device, val_loader, losses, accuracies, incorrect_samples)

    return losses, accuracies, incorrect_samples
Example #7
0
def main():

    logger.info(args)

    model = Net(numAngle=CONFIGS["MODEL"]["NUMANGLE"], numRho=CONFIGS["MODEL"]["NUMRHO"], backbone=CONFIGS["MODEL"]["BACKBONE"])
    model = model.cuda(device=CONFIGS["TRAIN"]["GPU_ID"])

    # load the pretrained model (you are free to load your own models)
    state_dict = torch.hub.load_state_dict_from_url("http://data.kaizhao.net/projects/deep-hough-transform/dht_r50_fpn_sel-c9a29d40.pth", check_hash=True)
    model.load_state_dict(state_dict)


    if args.model:
        if isfile(args.model):
            logger.info("=> loading pretrained model '{}'".format(args.model))
            checkpoint = torch.load(args.model)
            model.load_state_dict(checkpoint)
            logger.info("=> loaded checkpoint '{}'"
                  .format(args.model))
        else:
            logger.info("=> no pretrained model found at '{}'".format(args.model))
            exit()
    # dataloader
    test_loader = get_loader(CONFIGS["DATA"]["TEST_DIR"], CONFIGS["DATA"]["TEST_LABEL_FILE"], 
                                batch_size=1, num_thread=CONFIGS["DATA"]["WORKERS"], test=True)

    logger.info("Data loading done.")

    
    logger.info("Start testing.")
    total_time = test(test_loader, model, args)
    
    logger.info("Test done! Total %d imgs at %.4f secs without image io, fps: %.3f" % (len(test_loader), total_time, len(test_loader) / total_time))
Example #8
0
    root_path=config["train_reader"]["input_path"],
    nclasses=nclasses
)

train_loader = torch.utils.data.DataLoader(
    train_dataset,
    sampler=BalanceClassSampler(labels=train_dataset.get_labels(), mode="upsampling"),
    batch_size=config["train_config"]["batch_size"],
    pin_memory=False,
    drop_last=True,
    num_workers=config["train_config"]["num_workers"],
)
val_loader = torch.utils.data.DataLoader(
    validation_dataset,
    batch_size=config["train_config"]["batch_size"],
    num_workers=config["train_config"]["num_workers"],
    shuffle=False,
    sampler=SequentialSampler(validation_dataset),
    pin_memory=False,
)

if nfolds == 0:
    val_loader = None
#from torchsummary import summary
model = Net(num_classes=nclasses, config=config)

fitter = Fitter(model=model, device=device, config=config)
fitter.fit(train_loader, val_loader)


Example #9
0
def train(args):
    # CONFIGS = yaml.load(open(args.config)) # deprecated, please set the configs in parse_args()

    # Set device
    if torch.cuda.is_available():
        os.environ["CUDA_VISIBLE_DEVICES"] = args.device.strip()
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")  # Not suggested

    # Set save folder & logging config
    subfolder = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time()))
    if not args.save_folder or (not os.path.isdir(args.save_folder)):
        print(
            "Warning: Not invalid value of 'save_folder', set as default value: './save_folder'.."
        )
        save_folder = "./save_folder"
    else:
        save_folder = args.save_folder
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)
    save_folder = os.path.join(save_folder, subfolder)
    os.mkdir(save_folder)
    #TODO:logging

    # Load Dataset
    trainloader = get_loader(args.train_gtfile,
                             batch_size=args.batch_size,
                             num_thread=args.num_workers)
    valloader = get_loader(args.val_gtfile,
                           batch_size=args.batch_size,
                           num_thread=args.num_workers)

    # Init Net
    model = Net(numAngle=args.num_angle,
                numRho=args.num_rho,
                backbone=args.backbone)
    if args.resume:
        model.load_state_dict(torch.load(args.resume))
    model = torch.nn.DataParallel(model).to(device)

    # Optimizer
    optimizer = optim.Adam(model.parameters())

    # Loss
    criterion = torch.nn.CrossEntropyLoss()
    losses = AverageMeter()

    # Start Training
    model.train()
    iter = 0  # iter id start from 1
    for epoch in range(args.max_epoch):

        for batch in trainloader:
            start = time.time()
            iter += 1
            img_tensor, gt_tensor = batch
            optimizer.zero_grad()

            # Forwarding
            preds = model(img_tensor)

            # Calculate Loss
            loss = criterion(preds, gt_tensor)
            loss.backward()
            optimizer.step()
            losses.update(loss.item(), args.batch_size)

            if iter % args.show_interval == 0:
                logging.info(
                    f"Training [{epoch}/{args.max_epoch}][{iter}] Loss:{losses.avg} Time:{time.time()-start:.1f}s"
                )

            if iter % args.val_interval == 0:
                pass
Example #10
0
if __name__ == '__main__':
    SAVE_PATH = './checkpoint/cp100000.bin'
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    torch.backends.cudnn.benchmark = True
    logger = Logger('./logs')
    batch_size = 1
    load_checkpoint= True
    
    print( '%s: calling main function ... ' % os.path.basename(__file__))
    csv_path = '../data/detection_test.csv'
    img_path = '../data/detection_test_t'
    dataset = Rand_num(csv_path, img_path, 112*4, None)
    sampler = SequentialSampler(dataset)
    loader = DataLoader(dataset, batch_size = batch_size, sampler = sampler, shuffle = False, num_workers=2)

    net = Net(14)
    if load_checkpoint:
        net.load_state_dict(torch.load(SAVE_PATH))
        
    net.cuda()
        
    accu_tp=[]
    accu_fp=[]
    accu_iou=[]
    for epoch in range(1): 
        for num, data in enumerate(loader, 0):
                # get the inputs
            images, inputs, labels = data
            inputs, labels = inputs.float()[0]/256, labels.float()
#        
#                # wrap them in Variable
Example #11
0
def main():

    logger.info(args)
    assert os.path.isdir(CONFIGS["DATA"]["DIR"])

    if CONFIGS['TRAIN']['SEED'] is not None:
        random.seed(CONFIGS['TRAIN']['SEED'])
        torch.manual_seed(CONFIGS['TRAIN']['SEED'])
        cudnn.deterministic = True

    model = Net(numAngle=CONFIGS["MODEL"]["NUMANGLE"],
                numRho=CONFIGS["MODEL"]["NUMRHO"],
                backbone=CONFIGS["MODEL"]["BACKBONE"])

    if CONFIGS["TRAIN"]["DATA_PARALLEL"]:
        logger.info("Model Data Parallel")
        model = nn.DataParallel(model).cuda()
    else:
        model = model.cuda(device=CONFIGS["TRAIN"]["GPU_ID"])

    # optimizer
    optimizer = torch.optim.Adam(
        model.parameters(),
        lr=CONFIGS["OPTIMIZER"]["LR"],
        weight_decay=CONFIGS["OPTIMIZER"]["WEIGHT_DECAY"])

    # learning rate scheduler
    scheduler = lr_scheduler.MultiStepLR(
        optimizer,
        milestones=CONFIGS["OPTIMIZER"]["STEPS"],
        gamma=CONFIGS["OPTIMIZER"]["GAMMA"])
    best_acc1 = 0
    if args.resume:
        if isfile(args.resume):
            logger.info("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            model.load_state_dict(checkpoint['state_dict'])
            # optimizer.load_state_dict(checkpoint['optimizer'])
            logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            logger.info("=> no checkpoint found at '{}'".format(args.resume))

    # dataloader
    train_loader = get_loader(CONFIGS["DATA"]["DIR"],
                              CONFIGS["DATA"]["LABEL_FILE"],
                              batch_size=CONFIGS["DATA"]["BATCH_SIZE"],
                              num_thread=CONFIGS["DATA"]["WORKERS"],
                              split='train')
    val_loader = get_loader(CONFIGS["DATA"]["VAL_DIR"],
                            CONFIGS["DATA"]["VAL_LABEL_FILE"],
                            batch_size=1,
                            num_thread=CONFIGS["DATA"]["WORKERS"],
                            split='val')

    logger.info("Data loading done.")

    # Tensorboard summary

    writer = SummaryWriter(log_dir=os.path.join(CONFIGS["MISC"]["TMP"]))

    start_epoch = 0
    best_acc = best_acc1
    is_best = False
    start_time = time.time()

    if CONFIGS["TRAIN"]["RESUME"] is not None:
        raise (NotImplementedError)

    if CONFIGS["TRAIN"]["TEST"]:
        validate(val_loader, model, 0, writer, args)
        return

    logger.info("Start training.")

    for epoch in range(start_epoch, CONFIGS["TRAIN"]["EPOCHS"]):

        train(train_loader, model, optimizer, epoch, writer, args)
        acc = validate(val_loader, model, epoch, writer, args)
        #return
        scheduler.step()

        if best_acc < acc:
            is_best = True
            best_acc = acc
        else:
            is_best = False

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc,
                'optimizer': optimizer.state_dict()
            },
            is_best,
            path=CONFIGS["MISC"]["TMP"])

        t = time.time() - start_time
        elapsed = DayHourMinute(t)
        t /= (epoch + 1) - start_epoch  # seconds per epoch
        t = (CONFIGS["TRAIN"]["EPOCHS"] - epoch - 1) * t
        remaining = DayHourMinute(t)

        logger.info(
            "Epoch {0}/{1} finishied, auxiliaries saved to {2} .\t"
            "Elapsed {elapsed.days:d} days {elapsed.hours:d} hours {elapsed.minutes:d} minutes.\t"
            "Remaining {remaining.days:d} days {remaining.hours:d} hours {remaining.minutes:d} minutes."
            .format(epoch,
                    CONFIGS["TRAIN"]["EPOCHS"],
                    CONFIGS["MISC"]["TMP"],
                    elapsed=elapsed,
                    remaining=remaining))

    logger.info("Optimization done, ALL results saved to %s." %
                CONFIGS["MISC"]["TMP"])
Example #12
0
    dataset = Rand_num(csv_path, img_path, 112 * 4, None)
    sampler = RandomSampler(dataset)
    loader = DataLoader(dataset,
                        batch_size=batch_size,
                        sampler=sampler,
                        shuffle=False,
                        num_workers=2)

    #    dataiter = iter(loader)
    #    images, labels = dataiter.next()
    #    print (images)
    #    images=tensor_to_img(images)
    #    print (labels)
    #    print (images)

    net = Net(14 * batch_size)
    lstm = LSTMLayer(7 * 7 * (16 + 5 * 2), 64, 14 * 14 * (num_class + 5 * 2),
                     2, batch_size)
    lossfunction = Loss(batch_size)
    optimizer = optim.Adam([{
        'params': net.parameters()
    }, {
        'params': lstm.parameters(),
        'lr': 0.0001
    }],
                           lr=0,
                           weight_decay=0)
    if load_checkpoint:
        net.load_state_dict(torch.load(SAVE_PATH))

    net.cuda()
Example #13
0
    train_val_ratio = config["train_config"]["ratio"]
else:
    train_val_ratio = 0.9
color_mode = "RGB"
mode = "jpegfactor"
fold_number = config["train_config"]["fold"]
nclasses = config["train_config"]["nclasses"]
mode = config["train_config"]["mode"] if "mode" in config[
    "train_config"] else None
multiclass_df = config["train_config"][
    "multiclass_df"] if "multiclass_df" in config["train_config"] else None
device = 'cuda'

AUGMENTATIONS_TRAIN, AUGMENTATIONS_TEST = get_transforms()

model = Net(num_classes=nclasses, config=config)
if torch.cuda.device_count() > 1 and device == 'cuda':
    print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model).to(device)
checkpoint = torch.load(config["test_config"]["checkpoint"])
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()

test_ids = os.listdir(config['test_config']['input_path'])
for i in range(len(test_ids)):
    test_ids[i] = os.path.join(config['test_config']['input_path'],
                               test_ids[i])

dataset = DatasetSubmissionRetriever(
    image_names=np.array(test_ids),
    transforms=AUGMENTATIONS_TEST,
Example #14
0
import numpy as np
import mnist
from model.network import Net

print('Loadind data......')
num_classes = 10
train_images = mnist.train_images() #[60000, 28, 28]
train_labels = mnist.train_labels()
test_images = mnist.test_images()
test_labels = mnist.test_labels()

print('Preparing data......')
train_images = (train_images - np.mean(train_images))/np.std(train_images)
test_images = (test_images - np.mean(test_images))/np.std(test_images)
#train_images = train_images/255
#test_images = test_images/255
training_data = train_images.reshape(60000, 1, 28, 28)
training_labels = np.eye(num_classes)[train_labels]
testing_data = test_images.reshape(10000, 1, 28, 28)
testing_labels = np.eye(num_classes)[test_labels]

net = Net()
#print('Training Lenet......')
#net.train(training_data, training_labels, 100, 1, 'weights_fp.pkl')
#print('Testing Lenet......')
#net.test(testing_data, testing_labels, 100)
print('Testing with pretrained weights......')
net.test_with_pretrained_weights(testing_data, testing_labels, 1, 'pretrained_weights.pkl')
print('Predicting with pretrained weights......')
print(net.predict_with_pretrained_weights(testing_data[0], 'pretrained_weights.pkl'))
Example #15
0
import numpy as np
import mnist
from model.network import Net

print('Loadind data......')
num_classes = 10
train_images = mnist.train_images()  #[60000, 28, 28]
train_labels = mnist.train_labels()
test_images = mnist.test_images()
test_labels = mnist.test_labels()

print('Preparing data......')
train_images -= int(np.mean(train_images))
train_images /= int(np.std(train_images))
test_images -= int(np.mean(test_images))
test_images /= int(np.std(test_images))
training_data = train_images.reshape(60000, 1, 28, 28)
training_labels = np.eye(num_classes)[train_labels]
testing_data = test_images.reshape(10000, 1, 28, 28)
testing_labels = np.eye(num_classes)[test_labels]

net = Net()
#print('Training Lenet......')
#net.train(training_data, training_labels, 32, 1, 'weights.pkl')
#print('Testing Lenet......')
#net.test(testing_data, testing_labels, 100)
print('Testing with pretrained weights......')
net.test_with_pretrained_weights(testing_data, testing_labels, 100,
                                 'pretrained_weights.pkl')
Example #16
0
import numpy as np
import mnist
from model.network import Net

print('Loadind data......')
num_classes = 10
train_images = mnist.train_images()  #[60000, 28, 28]
train_labels = mnist.train_labels()
test_images = mnist.test_images()
test_labels = mnist.test_labels()

print('Preparing data......')
train_images -= int(np.mean(train_images))
train_images /= int(np.std(train_images))
test_images -= int(np.mean(test_images))
test_images /= int(np.std(test_images))
training_data = train_images.reshape(60000, 1, 28, 28)
training_labels = np.eye(num_classes)[train_labels]
testing_data = test_images.reshape(10000, 1, 28, 28)
testing_labels = np.eye(num_classes)[test_labels]

net = Net()
print('Training Lenet......')
net.train(training_data, training_labels, 32, 1, 'weights.pkl')
print('Testing Lenet......')
net.test(testing_data, testing_labels, 100)
print('Testing with pretrained weights......')
net.test_with_pretrained_weights(testing_data, testing_labels, 100,
                                 'pretrained_weights.pkl')
Example #17
0
valid_dataset = Alaska2Dataset(IMAGE_IDS_val, IMAGE_LABELS_val, augmentations=AUGMENTATIONS_TEST, color_mode=color_mode)

train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=batch_size,
                                           num_workers=num_workers,
                                           shuffle=True)

valid_loader = torch.utils.data.DataLoader(valid_dataset,
                                           batch_size=batch_size,
                                           num_workers=num_workers,
                                           shuffle=False)
device = 'cuda'

if torch.cuda.device_count() > 1 and device == 'cuda':
    print("Let's use", torch.cuda.device_count(), "GPUs!")
model = Net(num_classes=n_classes)
# pretrained model in my pc. now i will train on all images for 2 epochs
# model.load_state_dict(torch.load('./epoch_5_val_loss_7.03_auc_0.844.pth'))
model = nn.DataParallel(model).to(device)


optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4)
criterion = torch.nn.CrossEntropyLoss()

train_loss, val_loss = [], []

for epoch in range(epochs):
    print('Epoch {}/{}'.format(epoch, epochs - 1))
    print('-' * 10)
    model.train()
    running_loss = 0