示例#1
0
import sys
root_logger = logging.getLogger()
stdout_handler = logging.StreamHandler(sys.stdout)
root_logger.addHandler(stdout_handler)
root_logger.setLevel(logging.DEBUG)

if __name__ == "__main__":

    # get sym
    # Try different network 18, 50, 101 to find the best one
    sym = get_resnet_model('pretrained_models/resnet-34', 0)
    _, args_params, aux_params = mx.model.load_checkpoint('detect_full_scale', 574)

    # get some input
    # change it to the data rec you create, and modify the batch_size
    train_data = get_iterator(path='train.rec', data_shape=(3, 224, 224), label_width=7*7*9, batch_size=32, shuffle=True)
    val_data = get_iterator(path='val.rec', data_shape=(3, 224, 224), label_width=7*7*9, batch_size=32)
    
    # allocate gpu/cpu mem to the sym
    mod = mx.mod.Module(symbol=sym, context=mx.gpu(0))
    print("LOOKS GOOD SO FAR")
    # print metric design
    def loss_metric(label, pred):
        """
        label: np.array->(batch_size, 7,7,9)
        predict: same as label 
        """
        label = label.reshape((-1, 7, 7, 9))
        pred = pred.reshape((-1, 7, 7, 9))
        pred_shift = (pred+1)/2
        cl = label[:, :, :, 0]
示例#2
0
    if cpu == 1:
        context = mx.cpu(0)
    else:
        context = mx.gpu(0)

    # get sym
    # Try different network 18, 50, 101 to find the best one
    sym = get_resnet_model('pretrained_models/resnet-34', 0, lambda_noobj)
    _, args_params, aux_params = mx.model.load_checkpoint(
        'pretrained_models/resnet-34', 0)

    # get some input
    # change it to the data rec you create, and modify the batch_size
    train_data = get_iterator(path=train_data_path,
                              data_shape=(3, 224, 224),
                              label_width=7 * 7 * 9,
                              batch_size=batch_size,
                              shuffle=True)
    val_data = get_iterator(path=val_data_path,
                            data_shape=(3, 224, 224),
                            label_width=7 * 7 * 9,
                            batch_size=batch_size)

    # allocate gpu/cpu mem to the sym
    mod = mx.mod.Module(symbol=sym, context=context)

    # setup metric
    # metric = mx.metric.create(loss_metric, allow_extra_outputs=True)
    tme = time.time()

    # setup monitor for debugging
示例#3
0
root_logger = logging.getLogger()
stdout_handler = logging.StreamHandler(sys.stdout)
root_logger.addHandler(stdout_handler)
root_logger.setLevel(logging.DEBUG)

if __name__ == "__main__":

    tensorboard_use = True
    # get sym
    # Try different network 18, 50, 101 to find the best one
    sym = get_resnet_model('pretrained_models/resnet-34', 0)
    _, args_params, aux_params = mx.model.load_checkpoint('pretrained_models/resnet-34', 0)

    # get some input
    # change it to the data rec you create, and modify the batch_size
    train_data = get_iterator(path='DATA_rec/cat.rec', data_shape=(3, 224, 224), label_width=7*7*5, batch_size=32, shuffle=True)
    val_data = get_iterator(path='DATA_rec/cat.rec', data_shape=(3, 224, 224), label_width=7*7*5, batch_size=32)
    
    # allocate gpu/cpu mem to the sym
    mod = mx.mod.Module(symbol=sym, context=mx.gpu(0))

    # # print metric design
    def loss_metric(label, pred):
        """
        label: np.array->(batch_size, 7,7,5)
        predict: same as label
        """
        label = label.reshape((-1, 7, 7, 5))
        pred = pred.reshape((-1, 7, 7, 5))
        pred_shift = (pred+1)/2
        cl = label[:, :, :, 0]
示例#4
0
stdout_handler = logging.StreamHandler(sys.stdout)
root_logger.addHandler(stdout_handler)
root_logger.setLevel(logging.DEBUG)

if __name__ == "__main__":
    # get sym
    # Try different network 18, 50, 101 to find the best one
    sym = get_resnet_model_YoloV1('pretrained_models/resnet-34', 0)
    #_, args_params, aux_params = mx.model.load_checkpoint('pretrained_models/resnet-34', 0)
    _, args_params, aux_params = mx.model.load_checkpoint(
        'models/drive_detect', 200)
    # get some input
    # change it to the data rec you create, and modify the batch_size
    train_data = get_iterator(path='DATA_rec/drive_small.rec',
                              data_shape=(3, 224, 224),
                              label_width=7 * 7 * 9,
                              batch_size=2,
                              shuffle=True)
    val_data = get_iterator(path='DATA_rec/drive_small.rec',
                            data_shape=(3, 224, 224),
                            label_width=7 * 7 * 9,
                            batch_size=2)

    # allocate gpu/cpu mem to the sym
    mod = mx.mod.Module(symbol=sym, context=mx.cpu(0))

    # setup metric
    # metric = mx.metric.create(loss_metric, allow_extra_outputs=True)
    tme = time.time()
    logtrain = LogMetricsCallback('logs/train_' + str(tme))
    logtest = LogMetricsCallback('logs/val_' + str(tme))