def __init__(self):
     """Init global variables for contextual circuit bp."""
     self.name = 'contextual_model_stimuli'
     self.figure_name = 'f3a'
     self.config = Config()
     self.output_size = [10, 1]
     self.im_size = (51, 51, 10)
     self.model_input_image_size = [51, 51, 10]
     self.default_loss_function = 'pearson'
     self.score_metric = 'pearson'
     self.preprocess = [None]
     self.folds = {
         'train': 'train',
         'test': 'test'}
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.float_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(
             dtype='float',
             length=self.output_size[0])
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.float32,
             'reshape': None
         }
     }
Ejemplo n.º 2
0
 def __init__(self):
     self.name = 'cifar_10'
     self.extension = '.png'
     self.config = Config()
     self.output_size = [10, 1]
     self.im_size = [32, 32, 3]
     self.model_input_image_size = [32, 32, 3]
     self.default_loss_function = 'cce'
     self.score_metric = 'accuracy'
     self.preprocess = [None]
     self.shuffle = True  # Preshuffle data?
     self.folds = {'train': 'train', 'test': 'test'}
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.int64_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='int64')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.int64,
             'reshape': None
         }
     }
Ejemplo n.º 3
0
 def __init__(self):
     self.name = 'coco_2014'
     self.aux_dir = 'coco_images'
     self.extension = '.jpg'
     self.config = Config()
     self.output_size = [89, 1]
     self.im_size = [256, 256, 3]
     self.model_input_image_size = [224, 224, 3]
     self.default_loss_function = 'sigmoid'
     self.image_meta_file = '_annotations.npy'
     self.score_metric = 'f1'
     self.preprocess = ['pad_resize']
     self.shuffle = False  # Preshuffle data?
     self.folds = {'train': 'train2014', 'val': 'val2014'}
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.int64_feature
     }
     self.tf_dict = {
         'image':
         tf_fun.fixed_len_feature(dtype='string'),
         'label':
         tf_fun.fixed_len_feature(length=self.output_size[0], dtype='int64')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.int64,
             'reshape': None
         }
     }
    def __init__(self,
                 cell_id_list,
                 aux_targets,
                 aux_tf_dict,
                 output_size=[1],
                 im_size=None):
        """Allen cell tfrecord global variable init."""
        self.name = 'allen_cell_%s' % cell_id_list
        self.config = Config()
        self.folds = {'train': 'training', 'test': 'testing'}
        self.targets = {
            'image': tf_fun.bytes_feature,
            'f': tf_fun.float_feature
        }
        self.tf_dict = {
            'image': tf_fun.fixed_len_feature(dtype='string'),
            'f': tf_fun.fixed_len_feature(dtype='float')
        }
        # Add aux data
        for ((tk, tv), (dk, dv)) in zip(aux_targets.iteritems(),
                                        aux_tf_dict.iteritems()):
            self.targets[tk] = tv
            self.tf_dict[dk] = dv

        self.output_size = output_size
        self.im_size = im_size
        self.preprocess = [None]
        self.shuffle = False  # Preshuffle data?
Ejemplo n.º 5
0
 def __init__(self):
     self.name = 'spikefinder'
     self.config = Config()
     self.file_extension = '.csv'
     self.timepoints = 5  # Data is 100hz
     self.trim_nans = True
     self.output_size = [1, self.timepoints]
     self.im_size = [1, self.timepoints]
     self.model_input_image_size = [1, self.timepoints]
     self.default_loss_function = 'l2'
     self.score_metric = 'l2'
     self.preprocess = [None]
     self.folds = {'train': 'train', 'test': 'test'}
     self.targets = {
         'image': tf_fun.float_feature,
         'label': tf_fun.float_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='float'),
         'label': tf_fun.fixed_len_feature(dtype='float')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': None
         },
         'label': {
             'dtype': tf.float32,
             'reshape': None
         }
     }
    def __init__(self):
        self.name = 'crcns_1d_2nd_dff'
        self.config = Config()
        self.file_extension = '.csv'
        self.timepoints = 10  # Data is 100hz
        self.output_size = [2]
        self.im_size = [self.timepoints, 1]
        self.model_input_image_size = [self.timepoints, 1]
        self.default_loss_function = 'sigmoid_logits'
        self.score_metric = 'argmax_softmax_accuracy'
        self.fix_imbalance_train = True
        self.fix_imbalance_test = False
        self.preprocess = [None]
        self.train_prop = 0.80
        self.binarize_spikes = True
        self.df_f_window = 10
        self.use_df_f = True
        self.save_pickle = True
        self.shuffle_train = True
        self.shuffle_test = False
        self.pickle_name = 'cell_3_dff.p'
        self.overwrite_numpy = False
        self.validation_slice = 2

        # CRCNS data pointers
        self.crcns_dataset = os.path.join(
            self.config.data_root,
            'crcns',
            'cai-1')
        self.exp_name = 'GCaMP6s_9cells_Chen2013'
        self.numpy = os.path.join(
            self.crcns_dataset,
            '%s_1d.npz' % self.exp_name)

        # CC-BP dataset vars
        self.folds = {
            'train': 'train',
            'test': 'test'}
        self.targets = {
            'image': tf_fun.float_feature,
            'label': tf_fun.int64_feature
        }
        self.tf_dict = {
            'image': tf_fun.fixed_len_feature(
                length=self.im_size,
                dtype='float'),
            'label': tf_fun.fixed_len_feature(
                length=self.output_size[-1], dtype='int64')
        }
        self.tf_reader = {
            'image': {
                'dtype': tf.float32,
                'reshape': self.im_size
            },
            'label': {
                'dtype': tf.int64,
                'reshape': self.output_size
            }
        }
 def __init__(self):
     self.name = 'cluttered_nist_2_ix1_full_semantic'
     self.output_name = 'cluttered_nist_2_ix1_full_semantic'
     self.data_name = 'ix1'
     self.img_dir = 'imgs'
     self.contour_dir = '/media/data_cifs/cluttered_nist2_plus/'
     self.im_extension = '.png'
     self.label_regex = r'(?<=length)\d+'
     self.config = Config()
     self.im_size = [350, 350]  # 600, 600
     self.model_input_image_size = [160, 160, 1]  # [107, 160, 3]
     self.nhot_size = [26]
     self.max_ims = 20000000
     self.output_size = {'output': 1, 'aux': self.nhot_size[0]}
     self.label_size = [1]
     self.default_loss_function = 'cce'
     self.aux_loss = {'nhot': ['bce', 1.]}  # Loss type and scale
     self.score_metric = 'accuracy'
     self.store_z = False
     self.normalize_im = False
     self.all_flips = True
     self.shuffle = True
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = ['resize']  # ['resize_nn']
     self.meta = os.path.join('metadata', 'combined.npy')
     self.folds = {
         'train': 'train',
         'val': 'val'
     }
     self.cv_split = 0.9
     self.cv_balance = True
     self.targets = {
         'image': tf_fun.bytes_feature,
         'nhot': tf_fun.float_feature,
         'label': tf_fun.int64_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'nhot': tf_fun.fixed_len_feature(
             dtype='float',
             length=self.nhot_size),
         'label': tf_fun.fixed_len_feature(dtype='int64')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'nhot': {
             'dtype': tf.float32,
             'reshape': self.nhot_size
         },
         'label': {
             'dtype': tf.int64,
             'reshape': self.label_size
         }
     }
    def __init__(self):
        self.name = 'orientation_probe'
        # self.name = 'plaid_surround'
        self.output_name = 'orientation_probe_151_viz'
        self.img_dir = 'imgs'
        # self.contour_dir = '/media/data_cifs/cluster_projects/refactor_gammanet/plaid_surround'
        self.contour_dir = '/media/data_cifs_lrs/projects/prj_neural_circuits/refactor_gammanet/{}'.format(
            self.name)
        # self.perturb_a = "perturb_viz/gammanet_full_plaid_surround_outputs_data.npy"
        self.perturb_a = "perturb_viz/bsds_tcs.npy"
        self.perturb_b = "perturb_viz/0.npz"  # bsds_tcs.npy"

        self.im_extension = '.png'
        self.label_regex = r'(?<=length)\d+'
        self.config = Config()
        self.im_size = [320, 480, 3]  # [500, 500]  # 600, 600
        self.model_input_image_size = [320, 480, 3]  # [107, 160, 3]
        # self.output_size = [112, 112, 128]  # [320, 480, 1]  # [321, 481, 1]
        self.output_size = [1, 2048 + 6]  # [320, 480, 1]  # [321, 481, 1]
        self.max_ims = 100000
        self.label_size = self.output_size
        self.default_loss_function = 'l2'
        self.store_z = False
        self.normalize_im = False
        self.all_flips = True
        self.shuffle = True
        self.input_normalization = 'none'  # 'zscore'
        self.preprocess = []  # ['resize']  # ['resize_nn']
        self.meta = os.path.join('metadata', '1.npy')
        self.folds = {
            'train': 'train',
            'val': 'val',
            'test': 'test',
        }
        self.cv_split = 0.1
        self.targets = {
            'image': tf_fun.bytes_feature,
            'label': tf_fun.float_feature
        }
        self.tf_dict = {
            'image':
            tf_fun.fixed_len_feature(dtype='string'),
            'label':
            tf_fun.fixed_len_feature(dtype='float32', length=self.output_size)
        }
        self.tf_reader = {
            'image': {
                'dtype': tf.float32,
                'reshape': self.im_size
            },
            'label': {
                'dtype': tf.float32,
                'reshape': self.output_size
            }
        }
Ejemplo n.º 9
0
 def __init__(self):
     self.name = 'BSDS500'
     self.output_name = 'BSDS500_100'
     self.im_extension = '.jpg'
     self.lab_extension = '.mat'
     self.images_dir = 'images'
     self.labels_dir = 'groundTruth'
     self.processed_labels = 'processed_labels'
     self.processed_images = 'processed_images'
     self.config = Config()
     self.train_size = int(200 * 1)
     self.im_size = [321, 481, 3]  # [321, 481, 3]
     # self.model_input_image_size = [196, 196, 3]
     # self.model_input_image_size = [320, 480, 3]  # [224, 224, 3]
     self.model_input_image_size = [320, 320, 3]  # [224, 224, 3]
     self.val_model_input_image_size = [320, 480, 3]
     # self.model_input_image_size = [160, 240, 3]  # [224, 224, 3]
     # self.val_model_input_image_size = [160, 240, 3]
     # self.model_input_image_size = [80, 160, 3]  # [224, 224, 3]
     # self.val_model_input_image_size = [80, 160, 3]
     self.output_size = [321, 481, 1]  # [321, 481, 1]
     self.label_size = self.output_size
     self.default_loss_function = 'pearson'
     self.score_metric = 'sigmoid_accuracy'
     self.aux_scores = ['f1']
     self.store_z = True
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = [None]  # Preprocessing before tfrecords
     self.folds = {
         'train': 'train',
         'val': 'val'
     }
     self.fold_options = {
         'train': 'mean',
         'val': 'mean'
     }
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.bytes_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='string')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.float32,
             'reshape': self.output_size
         }
     }
Ejemplo n.º 10
0
 def __init__(self):
     self.output_name = 'BSDS500_100_hed'
     self.im_extension = '.jpg'
     self.lab_extension = '.mat'
     self.images_dir = '/media/data_cifs/pytorch_projects/datasets/BSDS500_crops/data/images/train'
     self.val_images_dir = '/media/data_cifs/pytorch_projects/datasets/BSDS500_crops/data/images/val'
     self.processed_labels = 'processed_labels'
     self.processed_images = 'processed_images'
     self.config = Config()
     self.train_size = int(1000 * 1)
     self.im_size = [320, 320, 3]  # [321, 481, 3]
     self.model_input_image_size = [320, 320, 3]  # [224, 224, 3]
     self.val_model_input_image_size = [320, 320, 3]
     self.output_size = [320, 320, 1]  # [321, 481, 1]
     self.label_size = self.output_size
     self.default_loss_function = 'pearson'
     self.score_metric = 'sigmoid_accuracy'
     self.aux_scores = ['f1']
     self.store_z = False
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = ['hed_pad']  # Preprocessing before tfrecords
     self.folds = {'train': 'train', 'val': 'val'}
     self.fold_options = {'train': 'mean', 'val': 'mean'}
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.bytes_feature,
         'height': tf_fun.int64_feature,
         'width': tf_fun.int64_feature,
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='string'),
         'height': tf_fun.fixed_len_feature(dtype='int64'),
         'width': tf_fun.fixed_len_feature(dtype='int64'),
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.float32,
             'reshape': self.output_size
         },
         'height': {
             'dtype': tf.int64,
             'reshape': []
         },
         'width': {
             'dtype': tf.int64,
             'reshape': []
         },
     }
Ejemplo n.º 11
0
 def __init__(self):
     self.name = 'new_LMD_512_egfr'
     self.output_name = 'new_LMD_512_egfr'
     self.kras_dir = '/media/data_cifs/andreas/pathology/2018-04-26/mar2019/LMD/4-03-2019/512_npys'
     self.im_extension = '.npy'
     self.label_regex = r'(?<=length)\d+'
     self.config = Config()
     self.im_size = [512, 512, 3]  # 600, 600
     self.model_input_image_size = [224, 224,
                                    3]  # [480, 480, 3]  # [107, 160, 3]
     self.max_ims = 125000
     self.output_size = [4]
     self.label_size = self.output_size
     self.default_loss_function = 'cce'
     self.score_metric = 'accuracy'
     self.store_z = False
     self.normalize_im = False
     self.all_flips = True
     self.balance = True
     self.shuffle = True
     self.calculate_moments = False
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = [
         'rgba2rgb'
     ]  # ['to_float32', 'crop_center']  # , 'exclude_white']  # 'rgba2rgb',
     self.LMD = ['3361805']
     self.val_set = self.LMD  # self.non_lung_cases_new  # self.non_lung_kras_cases_2017 + self.non_lung_non_kras_cases_2017  #  + self.non_lung_cases_new
     self.folds = {'train': 'train', 'val': 'val', 'test': 'test'}
     self.cv_split = 0.9
     self.cv_balance = True
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.float_feature
     }
     self.tf_dict = {
         'image':
         tf_fun.fixed_len_feature(dtype='string'),
         'label':
         tf_fun.fixed_len_feature(dtype='float32',
                                  length=self.output_size[0])
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.float32,
             'reshape': self.output_size
         }
     }
Ejemplo n.º 12
0
 def __init__(self):
     self.name = 'cube_plus'
     self.output_name = 'cube_plus'
     self.main_dir = '/media/data_cifs/image_datasets/cube_plus'
     self.image_dir = os.path.join(self.main_dir, 'images')
     self.label_file = os.path.join(self.main_dir, 'cube+_gt.txt')
     self.im_extension = '.PNG'
     self.label_regex = r'(?<=length)\d+'
     self.config = Config()
     self.im_size = [1732, 2601, 3]  # 600, 600
     self.model_input_image_size = [288, 433, 3]  # [107, 160, 3]
     self.max_ims = 125000
     self.output_size = [3]
     self.label_size = self.output_size
     self.default_loss_function = 'bce'
     self.score_metric = 'accuracy'
     self.store_z = False
     self.normalize_im = False
     self.all_flips = True
     self.balance = True
     self.shuffle = True
     self.calculate_moments = True
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = []
     self.folds = {
         'train': 'train',
         'val': 'val'
     }
     self.cv_split = 0.1
     self.cv_balance = True
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.float_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(
             dtype='float32',
             length=self.output_size[0])
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.float32,
             'reshape': self.output_size
         }
     }
 def __init__(self):
     self.name = 'shape_connectome_lumcontrast'
     self.output_name = 'shape_connectome_lumcontrast'
     self.data_name = 'lumcontrast'
     self.img_dir = 'imgs'
     self.contour_dir = '/media/data_cifs/synth/synth_connectomics/'
     self.im_extension = '.png'
     self.label_regex = r'(?<=length)\d+'
     self.config = Config()
     self.im_size = [350, 350]  # 600, 600
     self.model_input_image_size = [320, 320, 1]  # [107, 160, 3]
     self.nhot_size = [26]
     self.max_ims = 200
     self.output_size = {'output': 2, 'aux': self.nhot_size[0]}
     self.label_size = self.im_size
     self.default_loss_function = 'cce'
     # self.aux_loss = {'nhot': ['bce', 1.]}  # Loss type and scale
     self.score_metric = 'prop_positives'
     self.store_z = False
     self.normalize_im = False
     self.all_flips = True
     self.shuffle = True
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = ['resize']  # ['resize_nn']
     self.meta = os.path.join('metadata', 'combined.npy')
     self.folds = {
         'train': 'train',
         'val': 'val'
     }
     self.cv_split = 0.05
     self.cv_balance = True
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.bytes_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='string')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.float32,
             'reshape': self.label_size
         }
     }
 def __init__(self):
     self.name = 'curv_contour_length_9_full'
     self.output_name = 'curv_contour_length_9_full'
     self.data_name = 'curv_contour_length_9'
     self.contour_dir = '/media/data_cifs/curvy_2snakes_300/'
     self.im_extension = '.png'
     self.label_regex = r'(?<=length)\d+'
     self.config = Config()
     self.im_size = [300, 300]  # 600, 600
     self.model_input_image_size = [160, 160, 1]  # [107, 160, 3]
     self.max_ims = 125000
     self.output_size = [1]
     self.label_size = self.output_size
     self.default_loss_function = 'cce'
     self.score_metric = 'accuracy'
     self.store_z = False
     self.normalize_im = False
     self.all_flips = True
     self.balance = True
     self.shuffle = True
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = ['']  # ['resize_nn']
     self.meta = os.path.join('metadata', 'combined.npy')
     self.negative = 'curv_contour_length_9_neg'
     self.folds = {
         'train': 'train',
         'val': 'val'
     }
     self.cv_split = 0.9
     self.cv_balance = True
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.int64_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='int64')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.int64,
             'reshape': self.output_size
         }
     }
Ejemplo n.º 15
0
    def __init__(self):
        self.name = 'crcns_2d'
        self.config = Config()
        self.file_extension = '.csv'
        self.timepoints = 10  # Data is 100hz
        self.output_size = [1, 1]
        self.im_size = [self.timepoints, 256, 256, 1]
        self.model_input_image_size = [128, 128, 1]
        self.default_loss_function = 'sigmoid_logits'
        self.score_metric = 'argmax_softmax_accuracy'
        self.fix_imbalance = True
        self.preprocess = ['resize']
        self.train_prop = 0.80
        self.binarize_spikes = True
        self.df_f_window = 10
        self.use_df_f = False
        self.shuffle = True

        # CRCNS data pointers
        self.crcns_dataset = os.path.join(
            self.config.data_root,
            'crcns',
            'cai-1')
        self.exp_name = 'GCaMP6s_9cells_Chen2013'

        # CC-BP dataset vars
        self.folds = {
            'train': 'train',
            'test': 'test'
        }
        self.targets = {
            'image': tf_fun.bytes_feature,
            'label': tf_fun.float_feature
        }
        self.tf_dict = {
            'image': tf_fun.fixed_len_feature(dtype='string'),
            'label': tf_fun.fixed_len_feature(dtype='float')
        }
        self.tf_reader = {
            'image': {
                'dtype': tf.float16,
                'reshape': self.im_size
            },
            'label': {
                'dtype': tf.float32,
                'reshape': None
            }
        }
Ejemplo n.º 16
0
 def __init__(self):
     self.name = 'berson_005'
     self.output_name = 'berson_005'
     self.contour_dir = '/media/data_cifs/connectomics/datasets/berson_0.npz'
     self.config = Config()
     self.affinity = False
     self.im_size = [384, 384]  # 600, 600
     self.model_input_image_size = [384, 384, 1]  # [107, 160, 3]
     self.nhot_size = [26]
     self.max_ims = 222
     self.output_size = {'output': 1, 'aux': self.nhot_size[0]}
     self.label_size = self.im_size + [1]
     self.default_loss_function = 'cce'
     # self.aux_loss = {'nhot': ['bce', 1.]}  # Loss type and scale
     self.score_metric = 'prop_positives'
     self.store_z = False
     self.normalize_im = False
     self.all_flips = True
     self.shuffle = True
     self.input_normalization = 'none'  # 'zscore'
     self.meta = os.path.join('metadata', 'combined.npy')
     self.folds = {
         'train': 'train',
         'val': 'val',
         'test': 'test',
     }
     self.train_split = int(307 * .05)
     self.val_split = 307
     self.train_size = 1280
     self.cv_balance = True
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.bytes_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='string')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.float32,
             'reshape': self.label_size
         }
     }
 def __init__(self):
     self.name = 'gratings_undo_bias_main'
     self.output_name = 'gratings_undo_bias_main'
     self.img_dir = 'imgs'
     self.contour_dir = '/media/data_cifs/tilt_illusion'
     self.im_extension = '.png'
     self.label_regex = r'(?<=length)\d+'
     self.config = Config()
     self.im_size = [500, 500]  # 600, 600
     self.model_input_image_size = [224, 224, 1]  # [107, 160, 3]
     self.output_size = [2]
     self.max_ims = 100000
     self.label_size = self.output_size
     self.default_loss_function = 'l2'
     self.store_z = False
     self.normalize_im = False
     self.all_flips = True
     self.shuffle = True
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = []  # ['resize']  # ['resize_nn']
     self.meta = os.path.join('test', 'metadata', 'filtered_te.npy')
     self.folds = {
         'train': 'train',
         'val': 'val',
         'test': 'test',
     }
     self.cv_split = 0.1
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.float_feature
     }
     self.tf_dict = {
         'image':
         tf_fun.fixed_len_feature(dtype='string'),
         'label':
         tf_fun.fixed_len_feature(dtype='float32', length=self.output_size)
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.float32,
             'reshape': self.output_size
         }
     }
 def __init__(self):
     self.name = 'seg_cluttered_nist_3_baseline_50k'
     self.output_name = 'seg_cluttered_nist_3_baseline_50k'
     self.data_name = 'baseline'
     self.img_dir = 'imgs'
     self.contour_dir = '/media/data_cifs/cluttered_nist3_seg/'
     self.im_extension = '.png'
     self.label_regex = r'(?<=length)\d+'
     self.config = Config()
     self.im_size = [350, 350]  # 600, 600
     self.model_input_image_size = [160, 160, 1]  # [107, 160, 3]
     self.max_ims = 50000
     self.output_size = self.im_size + [2]
     self.label_size = self.output_size
     self.default_loss_function = 'cce'
     self.score_metric = 'accuracy'
     self.store_z = False
     self.normalize_im = False
     self.all_flips = True
     self.shuffle = True
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = ['resize', 'trim_extra_dims']  # ['resize_nn']
     self.meta = os.path.join('metadata', 'combined.npy')
     self.folds = {
         'train': 'train',
         'val': 'val'
     }
     self.cv_split = 0.9
     self.cv_balance = True
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.bytes_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='string')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.float32,
             'reshape': self.im_size
         }
     }
Ejemplo n.º 19
0
 def __init__(self):
     self.name = 'new_LMD_whole'
     self.output_name = 'new_LMD_whole'
     self.kras_dir = '/media/data_cifs/pathology/molecular_sahar/LMD/imgs'
     self.im_extension = '.jpg'
     self.label_regex = r'(?<=length)\d+'
     self.config = Config()
     self.im_size = [2028, 2028, 3]  # 600, 600
     self.model_input_image_size = [2028, 2028, 3]
     self.max_ims = 125000
     self.output_size = [1]
     self.label_size = self.output_size
     self.default_loss_function = 'cce'
     self.score_metric = 'accuracy'
     self.store_z = False
     self.normalize_im = False
     self.all_flips = True
     self.balance = True
     self.shuffle = True
     self.calculate_moments = False
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = ['rgba2rgb', 'macenko']
     self.LMD = ['3361805']
     self.val_set = self.LMD
     self.folds = {'train': 'train', 'val': 'val', 'test': 'test'}
     self.cv_split = 0.9
     self.cv_balance = True
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.int64_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='int64')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.int64,
             'reshape': self.output_size
         }
     }
Ejemplo n.º 20
0
 def __init__(self):
     self.name = 'ilsvrc12'
     self.output_name = 'ilsvrc12'
     self.img_dir = 'imgs'
     self.contour_dir = '/media/data_cifs/clicktionary/webapp_data'
     self.train_dir = 'lmdb_trains'
     self.val_dir = 'lmdb_validations'
     self.im_extension = '.JPEG'
     self.label_regex = r'(?<=length)\d+'
     self.config = Config()
     self.im_size = [256, 256, 3]  # 600, 600
     self.model_input_image_size = [224, 224, 3]  # [107, 160, 3]
     self.max_ims = np.inf
     self.output_size = []
     self.force_output_size = [1000]
     self.label_size = self.output_size
     self.default_loss_function = 'cce'
     self.score_metric = 'accuracy'
     self.store_z = False
     self.normalize_im = False
     self.all_flips = True
     self.shuffle = True
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = ['resize']  # ['resize_nn']
     self.meta = os.path.join('metadata', 'combined.npy')
     self.folds = {'train': 'train', 'val': 'val'}
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.int64_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='int64')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.int64,
             'reshape': self.output_size
         }
     }
Ejemplo n.º 21
0
 def __init__(self):
     self.name = 'BSDS500_2'
     self.orig_name = 'BSDS500'
     self.im_extension = '.jpg'
     self.lab_extension = '.mat'
     self.images_dir = 'images'
     self.labels_dir = 'groundTruth'
     self.processed_labels = 'processed_labels'
     self.processed_images = 'processed_images'
     self.config = Config()
     self.im_size = [321, 481, 3]
     self.model_input_image_size = [107, 160, 3]  # [150, 240, 3]
     self.output_size = [321, 481, 1]  # 256 x 256 pixels, 7 x 7 angle
     self.label_size = self.output_size
     self.default_loss_function = 'pearson'
     self.score_metric = 'pearson'
     self.aux_scores = ['f1']
     self.preprocess = [None]  # ['resize_nn']
     self.folds = {
         'train': 'train',
         'val': 'val'
     }
     self.fold_options = {
         'train': 'mean',
         'val': 'duplicate'
     }
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.bytes_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='string')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.float32,
             'reshape': self.output_size
         }
     }
Ejemplo n.º 22
0
 def __init__(self):
     self.name = 'baseline'
     self.data_name = 'baseline'
     self.output_name = 'cluttered_nist_caps_baseline'
     self.dataset_dir = '/media/data_cifs/cluttered_nist_caps'
     self.im_extension = '.png'
     self.label_regex = r'(?<=length)\d+'
     self.config = Config()
     self.im_size = [300, 300]
     self.model_input_image_size = [256, 256, 1]
     self.output_size =[1]
     self.default_loss_function = 'cce'
     self.score_metric = 'accuracy'
     self.store_z = False
     self.normalize_im = False
     self.all_flips = True
     self.shuffle = True
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = ['']  # ['resize_nn']
     self.meta = os.path.join('metadata', 'combined.npy')
     self.folds = {
         'train': 'train',
         'val': 'val'
     }
     self.cv_split = 0.9
     self.cv_balance = True
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.int64_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='int64')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.int64,
             'reshape': self.output_size
         }
     }
 def __init__(self):
     self.name = 'shapes'
     self.output_name = 'shapes_held_out_light'
     self.image_dir = '/media/data_cifs/image_datasets/'
     self.im_extension = '.png'
     self.label_regex = r'(?<=length)\d'
     self.config = Config()
     self.im_size = [500, 500]
     self.model_input_image_size = [256, 256, 1]  # [107, 160, 3]
     self.max_ims = None
     self.output_size = self.model_input_image_size
     self.label_size = self.output_size
     self.default_loss_function = 'l2'
     self.score_metric = 'pearson'
     self.store_z = False
     self.normalize_im = False
     self.shuffle = True
     self.img_file_ids = ['img1', 'img2', 'img3']
     self.test_im = 'img1'  # Which label are we using for the test set
     self.input_normalization = 'none'  # 'zscore'
     self.label_string = 'slant_ms_200_1'
     self.preprocess = ['']  # ['resize_nn']
     self.folds = {'train': 'train', 'val': 'val'}
     self.cv_split = 0.9
     self.cv_balance = False
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.bytes_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='string')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.float32,
             'reshape': self.output_size
         }
     }
Ejemplo n.º 24
0
 def __init__(self):
     self.name = 'BSDS_SBD'
     self.dataset_roots = ['BSDS500', 'SBD']
     self.orig_name = 'BSDS_SBD'
     self.im_extension = '.jpg'
     self.lab_extension = '.mat'
     self.images_dir = 'images'
     self.labels_dir = 'groundTruth'
     self.processed_labels = 'processed_labels'
     self.processed_images = 'processed_images'
     self.config = Config()
     self.im_size = [321, 481, 3]
     self.sum_imgs = np.zeros(self.im_size)
     self.lab_size = (321, 481)  #Opposite to convention, opencv standards
     self.model_input_image_size = [
         150, 240, 3
     ]  #[321, 481, 3] #[150, 240, 3]  # [107, 160, 3]
     self.output_size = [321, 481, 1]
     self.label_size = self.output_size
     self.default_loss_function = 'pearson'
     self.score_metric = 'pearson'
     self.aux_scores = ['f1']
     self.preprocess = [None]  # ['resize_nn']
     self.folds = {'train': 'train', 'val': 'val'}
     self.fold_options = {'train': 'duplicate', 'val': 'mean'}
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.bytes_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='string')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.float32,
             'reshape': self.output_size
         }
     }
Ejemplo n.º 25
0
 def __init__(self):
     self.name = 'multicue_boundaries'
     self.output_name = 'multicue_boundaries'
     self.image_dir = '/media/data_cifs/pytorch_projects/datasets/Multicue_crops/data/images'
     self.im_extension = '.jpg'
     self.label_regex = r'(?<=length)\d+'
     self.config = Config()
     self.im_size = [500, 500, 3]  # 600, 600
     self.model_input_image_size = [320, 400, 3]  # [224, 224, 3]
     self.val_model_input_image_size = [320, 400, 3]
     self.output_size = [320, 400, 1]  # [321, 481, 1]
     self.label_size = self.output_size
     self.default_loss_function = 'cce'
     self.score_metric = 'accuracy'
     self.store_z = False
     self.normalize_im = False
     self.all_flips = True
     self.balance = True
     self.shuffle = True
     self.calculate_moments = False
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = []
     self.folds = {'train': 'train', 'val': 'val', 'test': 'test'}
     self.cv_split = 0.1
     self.cv_balance = True
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.bytes_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='string')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.float32,
             'reshape': self.output_size
         }
     }
Ejemplo n.º 26
0
 def __init__(self):
     self.name = 'new_LMD'
     self.output_name = 'new_LMD'
     self.kras_dir = '/media/data_cifs/andreas/pathology/2018-04-26/mar2019/LMD/patch_npys'
     self.im_extension = '.npy'
     self.label_regex = r'(?<=length)\d+'
     self.config = Config()
     self.im_size = [230, 230, 3]  # 600, 600
     self.model_input_image_size = [200, 200, 3]  # [107, 160, 3]
     self.output_size = [1]
     self.label_size = self.output_size
     self.default_loss_function = 'bce'
     self.score_metric = 'accuracy'
     self.store_z = False
     self.normalize_im = False
     self.all_flips = True
     self.balance = True
     self.shuffle = True
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = ['rgba2rgb', 'crop_center']  # ['resize_nn']
     self.LMD = ['3361805']
     self.val_set = self.LMD  # self.non_lung_cases_new  # self.non_lung_kras_cases_2017 + self.non_lung_non_kras_cases_2017  #  + self.non_lung_cases_new
     self.folds = {'train': 'train', 'val': 'val'}
     self.cv_split = 0.9
     self.cv_balance = True
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.int64_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='int64')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.int64,
             'reshape': self.output_size
         }
     }
Ejemplo n.º 27
0
 def __init__(self):
     self.name = 'contours_gilbert_256_sparse_contrast'
     self.im_extension = '.png'
     self.images_dir = 'images'
     self.label_regex = r'(?<=length)\d+'
     self.config = Config()
     self.im_size = [256, 256, 3]  # 600, 600
     self.model_input_image_size = [256, 256, 3]  # [107, 160, 3]
     self.max_ims = 0
     self.output_size = [1]
     self.label_size = self.output_size
     self.default_loss_function = 'cce'
     self.score_metric = 'accuracy'
     self.store_z = False
     self.normalize_im = True
     self.shuffle = True
     self.input_normalization = 'none'
     self.preprocess = ['resize']  # ['resize_nn']
     self.folds = {
         'train': 'train',
         'val': 'val'
     }
     self.cv_split = 0.9
     self.cv_balance = True
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.int64_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='int64')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.int64,
             'reshape': self.output_size
         }
     }
Ejemplo n.º 28
0
 def __init__(self):
     self.name = 'hed_BSDS500'
     self.output_name = 'hed_BSDS500'
     self.images_dir = '/media/data_cifs/image_datasets/hed_bsds/HED-BSDS'
     self.processed_labels = 'processed_labels'
     self.processed_images = 'processed_images'
     self.config = Config()
     self.im_size = [321, 481, 3]
     # self.model_input_image_size = [196, 196, 3]
     self.model_input_image_size = [320, 320, 3]  # [224, 224, 3]
     self.val_model_input_image_size = [320, 320, 3]
     self.output_size = [321, 481, 1]
     self.label_size = self.output_size
     self.default_loss_function = 'pearson'
     self.score_metric = 'sigmoid_accuracy'
     self.aux_scores = ['f1']
     self.store_z = True
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = [None]  # Preprocessing before tfrecords
     self.folds = {
         'train': 'train',
         'val': 'val',
         'test': 'test',
     }
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.bytes_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='string')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.float32,
             'reshape': self.output_size
         }
     }
 def __init__(self):
     self.output_name = 'multicue_001_boundaries_jk'
     self.im_extension = '.jpg'
     self.lab_extension = '.mat'
     self.images_dir = '/media/data_cifs/pytorch_projects/datasets/Multicue_crops/data/images/train'
     self.val_images_dir = '/media/data_cifs/pytorch_projects/datasets/Multicue_crops/data/images/test'
     self.processed_labels = 'processed_labels'
     self.processed_images = 'processed_images'
     self.config = Config()
     self.train_size = int(760 * 0.01)
     self.im_size = [500, 500, 3]  # [321, 481, 3]
     self.model_input_image_size = [320, 320, 3]  # [224, 224, 3]
     self.val_model_input_image_size = [320, 320, 3]
     self.output_size = [500, 500, 1]  # [321, 481, 1]
     self.label_size = self.output_size
     self.default_loss_function = 'pearson'
     self.score_metric = 'sigmoid_accuracy'
     self.aux_scores = ['f1']
     self.store_z = False
     self.input_normalization = 'none'  # 'zscore'
     self.preprocess = [None]  # Preprocessing before tfrecords
     self.folds = {'train': 'train', 'val': 'val'}
     self.fold_options = {'train': 'mean', 'val': 'mean'}
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.bytes_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='string')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': self.im_size
         },
         'label': {
             'dtype': tf.float32,
             'reshape': self.output_size
         }
     }
Ejemplo n.º 30
0
 def __init__(self):
     self.name = 'sheinberg_data_noise_subtracted'
     self.data_name = 'sheinberg_data'
     self.config = Config()
     self.output_size = [1, 1]
     self.im_size = [192, 256, 3]
     self.model_input_image_size = [192, 256, 3]
     self.num_rf_images = 2000
     self.default_loss_function = 'l2'
     self.score_metric = 'l2'
     self.preprocess = [None]
     self.im_ext = '.jpg'
     self.im_folder = 'scene_images'
     self.neural_data = 'spike'  # 'spike'
     self.val_set = -76
     self.save_npys = True
     self.num_channels = 33  # 32 with indexing from 1
     self.dates = ['100614', '100714', '100814', '100914']
     # Recording starts 200msec before onset.
     # Target is 50 - 150ms. = 270 - 370.
     self.spike_range = [250, 350]
     self.resize = [192, 256]
     self.folds = {'train': 'train', 'test': 'test'}
     self.targets = {
         'image': tf_fun.bytes_feature,
         'label': tf_fun.float_feature
     }
     self.tf_dict = {
         'image': tf_fun.fixed_len_feature(dtype='string'),
         'label': tf_fun.fixed_len_feature(dtype='float')
     }
     self.tf_reader = {
         'image': {
             'dtype': tf.float32,
             'reshape': None
         },
         'label': {
             'dtype': tf.float32,
             'reshape': None
         }
     }