Ejemplo n.º 1
0
    sys.stdout.flush()

    mean_file = os.path.join(configuration.get_data_dir(), "mean.dat")
    shape_file = os.path.join(configuration.get_data_dir(), "shape.dat")
    #
    input_shape = np.fromfile(shape_file, dtype=np.int32)
    mean_image = np.fromfile(mean_file, dtype=np.float32)
    mean_image = np.reshape(mean_image, input_shape)
    number_of_classes = configuration.get_number_of_classes()
    # loading tfrecords into a dataset object
    if pargs.mode == "train":
        tr_dataset = tf.data.TFRecordDataset(tfr_train_file)
        tr_dataset = tr_dataset.map(
            lambda x: data.parser_tfrecord(x,
                                           input_shape,
                                           mean_image,
                                           number_of_classes,
                                           with_augmentation=True))
        tr_dataset = tr_dataset.shuffle(configuration.get_shuffle_size())
        tr_dataset = tr_dataset.batch(
            batch_size=configuration.get_batch_size())

    if pargs.mode == "train" or pargs.mode == "test":
        val_dataset = tf.data.TFRecordDataset(tfr_test_file)
        val_dataset = val_dataset.map(
            lambda x: data.parser_tfrecord(x,
                                           input_shape,
                                           mean_image,
                                           number_of_classes,
                                           with_augmentation=False))
        val_dataset = val_dataset.batch(
Ejemplo n.º 2
0
    configuration = conf.ConfigurationFile(configuration_file, pargs.name)
    #parser_tf_record
    #/home/vision/smb-datasets/MNIST-5000/ConvNet2.0/
    tfr_train_file = os.path.join(configuration.get_data_dir(), pargs.file)

    mean_file = os.path.join(configuration.get_data_dir(), "mean.dat")
    shape_file = os.path.join(configuration.get_data_dir(), "shape.dat")
    #
    input_shape = np.fromfile(shape_file, dtype=np.int32)
    mean_image = np.fromfile(mean_file, dtype=np.float32)
    mean_image = np.reshape(mean_image, input_shape)

    number_of_classes = configuration.get_number_of_classes()

    tr_dataset = tf.data.TFRecordDataset(tfr_train_file)
    tr_dataset = tr_dataset.map(lambda x: data.parser_tfrecord(
        x, input_shape, 0, number_of_classes, False))
    tr_dataset = tr_dataset.shuffle(configuration.get_shuffle_size())
    tr_dataset = tr_dataset.batch(batch_size=configuration.get_batch_size())

    n_rows = 8
    n_cols = 8
    fig, xs = plt.subplots(n_rows, n_cols)
    for i in range(n_rows):
        for j in range(n_cols):
            xs[i, j].set_axis_off()
    for image, label in tr_dataset:
        for i in range(64):
            row = int(i / n_cols)
            col = i % n_cols
            im = image[i]
            #im = 255 * (image[i] - np.min(image[i]))/ (np.max(image[i])-np.min(image[i]))
Ejemplo n.º 3
0
            tfr_train_file=[os.path.join(configuration.get_data_dir(), "train_{}.tfrecords".format(idx)) for idx in range(configuration.get_num_threads())]
        if pargs.mode == 'train' or  pargs.mode == 'test':    
            tfr_test_file=[os.path.join(configuration.get_data_dir(), "test_{}.tfrecords".format(idx)) for idx in range(configuration.get_num_threads())]        
    sys.stdout.flush()
        
    mean_file = os.path.join(configuration.get_data_dir(), "mean.dat")
    shape_file = os.path.join(configuration.get_data_dir(),"shape.dat")
    #
    input_shape =  np.fromfile(shape_file, dtype=np.int32)
    mean_image = np.fromfile(mean_file, dtype=np.float32)
    mean_image = np.reshape(mean_image, input_shape)        
    number_of_classes = configuration.get_number_of_classes()
    #loading tfrecords into a dataset object
    if pargs.mode == 'train' : 
        tr_dataset = tf.data.TFRecordDataset(tfr_train_file)
        tr_dataset = tr_dataset.map(lambda x : data.parser_tfrecord(x, input_shape, mean_image, number_of_classes, with_augmentation = True));    
        tr_dataset = tr_dataset.shuffle(configuration.get_shuffle_size())        
        tr_dataset = tr_dataset.batch(batch_size = configuration.get_batch_size())            

    if pargs.mode == 'train' or  pargs.mode == 'test':
        val_dataset = tf.data.TFRecordDataset(tfr_test_file)
        val_dataset = val_dataset.map(lambda x : data.parser_tfrecord(x, input_shape, mean_image, number_of_classes, with_augmentation = False));    
        val_dataset = val_dataset.batch(batch_size = configuration.get_batch_size())
                        
       
    #Defining callback for saving checkpoints
    #save_freq: frequency in terms of number steps each time checkpoint is saved 
    model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=os.path.join(configuration.get_snapshot_dir(), '{epoch:03d}.h5'),
        save_weights_only=True,
        mode = 'max',
Ejemplo n.º 4
0
    configuration_file = pargs.config
    configuration = conf.ConfigurationFile(configuration_file, pargs.name)
    #parser_tf_record
    #/home/vision/smb-datasets/MNIST-5000/CC7221-HW2.0/
    tfr_train_file = os.path.join(configuration.get_data_dir(), pargs.file)

    mean_file = os.path.join(configuration.get_data_dir(), "mean.dat")
    shape_file = os.path.join(configuration.get_data_dir(), "shape.dat")
    #
    input_shape = np.fromfile(shape_file, dtype=np.int32)
    mean_image = np.fromfile(mean_file, dtype=np.float32)
    mean_image = np.reshape(mean_image, input_shape)

    number_of_classes = configuration.get_number_of_classes()

    tr_dataset = tf.data.TFRecordDataset(tfr_train_file)
    tr_dataset = tr_dataset.map(lambda x: data.parser_tfrecord(
        x, input_shape, mean_image, number_of_classes, 'test'))
    tr_dataset = tr_dataset.shuffle(configuration.get_shuffle_size())
    tr_dataset = tr_dataset.batch(batch_size=configuration.get_batch_size())

    fig, xs = plt.subplots(2, 5)
    for image, label in tr_dataset:
        for i in range(10):
            row = int(i / 5)
            col = i % 5
            im = 255 * (image[i] - np.min(image[i])) / (np.max(image[i]) -
                                                        np.min(image[i]))
            xs[row, col].imshow(np.uint8(im), cmap='gray')
        plt.pause(1)