def __init__(self, filters, kernel_size, kernel_initializer='glorot_uniform', activation=None, weights=None, padding='valid', strides=(1, 1), data_format=None, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, use_bias=True, **kwargs): if data_format is None: data_format = K.image_data_format() if padding not in {'valid', 'same', 'full'}: raise ValueError('Invalid border mode for CosineConvolution2D:', padding) self.filters = filters self.kernel_size = kernel_size self.nb_row, self.nb_col = self.kernel_size self.kernel_initializer = initializers.get(kernel_initializer) self.activation = activations.get(activation) self.padding = padding self.strides = tuple(strides) self.data_format = normalize_data_format(data_format) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.use_bias = use_bias self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights super(CosineConvolution2D, self).__init__(**kwargs)
def __init__(self, rank, filters, kernel_size, strides=1, padding='valid', data_format='channels_first', dilation_rate=1, activation=None, use_bias=True, normalize_weight=False, kernel_initializer='quaternion', bias_initializer='zeros', gamma_diag_initializer=sqrt_init, gamma_off_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, gamma_diag_regularizer=None, gamma_off_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, gamma_diag_constraint=None, gamma_off_constraint=None, init_criterion='he', seed=None, spectral_parametrization=False, epsilon=1e-7, **kwargs): super(QuaternionConv, self).__init__(**kwargs) self.rank = rank self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = common.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate') self.activation = activations.get(activation) self.use_bias = use_bias self.normalize_weight = normalize_weight self.init_criterion = init_criterion self.spectral_parametrization = spectral_parametrization self.epsilon = epsilon self.kernel_initializer = sanitizedInitGet(kernel_initializer) self.bias_initializer = sanitizedInitGet(bias_initializer) self.gamma_diag_initializer = sanitizedInitGet(gamma_diag_initializer) self.gamma_off_initializer = sanitizedInitGet(gamma_off_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.gamma_diag_regularizer = regularizers.get(gamma_diag_regularizer) self.gamma_off_regularizer = regularizers.get(gamma_off_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.gamma_diag_constraint = constraints.get(gamma_diag_constraint) self.gamma_off_constraint = constraints.get(gamma_off_constraint) if seed is None: self.seed = np.random.randint(1, 10e6) else: self.seed = seed self.input_spec = InputSpec(ndim=self.rank + 2)
def __init__(self, filters, kernel_size, strides=1, rank=2, padding='valid', data_format=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Conv2D121, self).__init__(**kwargs) self.rank = rank self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') # normalize_padding: 检查padding的值,只有['valid', 'same', 'causal']三个值合法 self.padding = conv_utils.normalize_padding(padding) # data_format: 检查 self.data_format = normalize_data_format(data_format) self.use_bias = use_bias, self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(ndim=self.rank + 2)
def __init__(self, padding=(1, 1), data_format=None, **kwargs): super(ReflectionPadding2D, self).__init__(**kwargs) self.data_format = K.normalize_data_format(data_format) if isinstance(padding, int): self.padding = ((padding, padding), (padding, padding)) elif hasattr(padding, '__len__'): if len(padding) != 2: raise ValueError('`padding` should have two elements. ' 'Found: ' + str(padding)) height_padding = conv_utils.normalize_tuple(padding[0], 2, '1st entry of padding') width_padding = conv_utils.normalize_tuple(padding[1], 2, '2nd entry of padding') self.padding = (height_padding, width_padding) else: raise ValueError('`padding` should be either an int, ' 'a tuple of 2 ints ' '(symmetric_height_pad, symmetric_width_pad), ' 'or a tuple of 2 tuples of 2 ints ' '((top_pad, bottom_pad), (left_pad, right_pad)). ' 'Found: ' + str(padding)) self.input_spec = InputSpec(ndim=4)
def get_2data(data_name, resize=True, data_format=None): #当train和test数据被分为两个部分时使用 file_name = os.path.join( pic_dir_out, data_name + str(Width) + "X" + str(Height) + ".pkl") if os.path.exists(file_name): #判断之前是否有存到文件中 (X_train, y_train), (X_test, y_test) = pickle.load(open(file_name, "rb")) return (X_train, y_train), (X_test, y_test) data_format = normalize_data_format(data_format) all_dir_set = eachFile(pic_dir_data) X_train = [] y_train = [] X_test = [] y_test = [] for all_dir in all_dir_set: if not os.path.isdir(os.path.join(pic_dir_data, all_dir)): continue label = 0 pic_dir_set = eachFile(os.path.join(pic_dir_data, all_dir)) for pic_dir in pic_dir_set: print(pic_dir_data + pic_dir) if not os.path.isdir(os.path.join(pic_dir_data, all_dir, pic_dir)): continue pic_set = eachFile(os.path.join(pic_dir_data, all_dir, pic_dir)) for pic_name in pic_set: if not os.path.isfile( os.path.join(pic_dir_data, all_dir, pic_dir, pic_name)): continue img = cv2.imread( os.path.join(pic_dir_data, all_dir, pic_dir, pic_name)) if img is None: continue img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if resize: img = cv2.resize(img, (Width, Height)) if (data_format == 'channels_last'): img = img.reshape(-1, Width, Height, 1) elif (data_format == 'channels_first'): img = img.reshape(-1, 1, Width, Height) if ('train' in all_dir): X_train.append(img) y_train.append(label) elif ('test' in all_dir): X_test.append(img) y_test.append(label) if len(pic_set) != 0: label += 1 X_train = np.concatenate(X_train, axis=0) X_test = np.concatenate(X_test, axis=0) y_train = np.array(y_train) y_test = np.array(y_test) pickle.dump([(X_train, y_train), (X_test, y_test)], open(file_name, "wb")) return (X_train, y_train), (X_test, y_test)
def __init__(self, upsampling=(2, 2), data_format=None, **kwargs): """ During its instantiation, it require two up sampling parameter. :param upsampling: :param data_format: :param kwargs: """ super(BilinearUpsampling, self).__init__(**kwargs) self.data_format = normalize_data_format(data_format) self.upsampling = conv_utils.normalize_tuple(upsampling, 2, "size") self.input_spec = InputSpec(ndim=4)
def get_data(data_name, train_percentage=0.7, resize=True, data_format=None): #从文件夹中获取图像数据 file_name = os.path.join( pic_dir_out, data_name + str(Width) + "X" + str(Height) + ".pkl") if os.path.exists(file_name): #判断之前是否有存到文件中 (X_train, y_train), (X_test, y_test) = pickle.load(open(file_name, "rb")) return (X_train, y_train), (X_test, y_test) data_format = normalize_data_format(data_format) pic_dir_set = eachFile(pic_dir_data) X_train = [] y_train = [] X_test = [] y_test = [] label = 0 for pic_dir in pic_dir_set: print(pic_dir_data + pic_dir) if not os.path.isdir(os.path.join(pic_dir_data, pic_dir)): continue pic_set = eachFile(os.path.join(pic_dir_data, pic_dir)) pic_index = 0 train_count = int(len(pic_set) * train_percentage) for pic_name in pic_set: if not os.path.isfile(os.path.join(pic_dir_data, pic_dir, pic_name)): continue img = cv2.imread(os.path.join(pic_dir_data, pic_dir, pic_name)) if img is None: continue img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # BGR通道转单通道 if (resize): img = cv2.resize(img, (Width, Height)) # 图片大小裁剪 if (data_format == 'channels_last'): img = img.reshape(-1, Width, Height, 1) # 图片形状调整 elif (data_format == 'channels_first'): img = img.reshape(-1, 1, Width, Height) if (pic_index < train_count): X_train.append(img) y_train.append(label) else: X_test.append(img) y_test.append(label) pic_index += 1 if len(pic_set) != 0: label += 1 X_train = np.concatenate(X_train, axis=0) X_test = np.concatenate(X_test, axis=0) y_train = np.array(y_train) y_test = np.array(y_test) pickle.dump([(X_train, y_train), (X_test, y_test)], open(file_name, "wb")) return (X_train, y_train), (X_test, y_test)
def __init__(self, target_shape, offset=None, data_format=None, **kwargs): super(CroppingLike2D, self).__init__(**kwargs) self.data_format = normalize_data_format(data_format) self.target_shape = target_shape if offset is None or offset == 'centered': self.offset = 'centered' elif isinstance(offset, int): self.offset = (offset, offset) elif hasattr(offset, '__len__'): if len(offset) != 2: raise ValueError('`offset` should have two elements. ' 'Found: ' + str(offset)) self.offset = offset self.input_spec = InputSpec(ndim=4)
def __init__(self, upsampling=(2, 2), output_size=None, data_format=None, **kwargs): super(BilinearUpsampling, self).__init__(**kwargs) self.data_format = normalize_data_format(data_format) self.input_spec = InputSpec(ndim=4) if output_size: self.output_size = conv_utils.normalize_tuple( output_size, 2, 'output_size') self.upsampling = None else: self.output_size = None self.upsampling = conv_utils.normalize_tuple( upsampling, 2, 'upsampling')
def __init__(self, filters=1, kernel_size=80, rank=1, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True, fsHz=1000., fc_initializer=initializers.RandomUniform(minval=10, maxval=4000), n_order_initializer=initializers.constant(4.), amp_initializer=initializers.constant(10**5), beta_initializer=initializers.RandomNormal(mean=30, stddev=6), bias_initializer='zeros', **kwargs): super(Conv1D_gammatone_coeff, self).__init__(**kwargs) self.rank = rank self.filters = filters self.kernel_size_ = kernel_size self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple( dilation_rate, rank, 'dilation_rate') self.activation = activations.get(activation) self.use_bias = use_bias self.bias_initializer = initializers.get(bias_initializer) self.fc_initializer = initializers.get(fc_initializer) self.n_order_initializer = initializers.get(n_order_initializer) self.amp_initializer = initializers.get(amp_initializer) self.beta_initializer = initializers.get(beta_initializer) self.input_spec = InputSpec(ndim=self.rank + 2) self.fc = self.fc_initializer.__call__((self.filters, 1)) self.n_order = self.n_order_initializer((1, 1)) self.amp = self.amp_initializer((self.filters, 1)) self.beta = self.beta_initializer((self.filters, 1)) self.fsHz = fsHz self.t = tf.range(start=0, limit=kernel_size / float(fsHz), delta=1 / float(fsHz), dtype=K.floatx()) self.t = tf.expand_dims(input=self.t, axis=-1)
def __init__(self, size=(2, 2), num_pixels=(0, 0), data_format='channels_last', method_name='FgSegNet_M', **kwargs): super(MyUpSampling2D, self).__init__(**kwargs) self.data_format = normalize_data_format(data_format) self.size = conv_utils.normalize_tuple(size, 2, 'size') self.input_spec = InputSpec(ndim=4) self.num_pixels = num_pixels self.method_name = method_name print('MyUpsamling is being used') assert method_name in ['FgSegNet_M', 'FgSegNet_S', 'FgSegNet_v2' ], 'Provided method_name is incorrect.'
def __init__(self, filters, kernel_size, rank=1, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, type=1, **kwargs): super(Conv1D_linearphaseType, self).__init__(**kwargs) self.rank = rank self.filters = filters self.kernel_size_ = kernel_size if kernel_size % 2: self.kernel_size = conv_utils.normalize_tuple( kernel_size // 2 + 1, rank, 'kernel_size') else: self.kernel_size = conv_utils.normalize_tuple( kernel_size // 2, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple( dilation_rate, rank, 'dilation_rate') self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(ndim=self.rank + 2) if type > 4: raise ValueError('FIR type should be between 1-4') else: self.type = type
def __init__(self, type=2, n=None, axis=-2, norm=None, rank=1, data_format='channels_last', **kwargs): super(DCT1D, self).__init__(**kwargs) self.rank = rank self.type = type self.n = n self.axis = axis self.norm = norm self.data_format = normalize_data_format(data_format) self.input_spec = InputSpec(ndim=self.rank + 2) if norm is not None: if norm != 'ortho': raise ValueError('Normalization should be `ortho` or `None`')
def __init__(self, axis=-1, gamma_init='one', beta_init='zero', gamma_regularizer=None, beta_regularizer=None, epsilon=1e-6, group=32, data_format=None, **kwargs): super(GroupNormalization, self).__init__(**kwargs) self.axis = to_list(axis) self.gamma_init = initializers.get(gamma_init) self.beta_init = initializers.get(beta_init) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.epsilon = epsilon self.group = group self.data_format = normalize_data_format(data_format)####### self.supports_masking = True
def __init__(self, filters, kernel_size, rank=1, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Conv1D_zerophase, self).__init__(**kwargs) self.rank = rank self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple( dilation_rate, rank, 'dilation_rate') self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(ndim=self.rank + 2)
def __init__(self, scale_factor=2, data_format=None, **kwargs): super(SubPixelUpscaling, self).__init__(**kwargs) self.scale_factor = scale_factor self.data_format = normalize_data_format(data_format)
def __init__(self, size=(2, 2), data_format=None, **kwargs): super(PixelShuffler, self).__init__(**kwargs) self.data_format = normalize_data_format(data_format) self.size = conv_utils.normalize_tuple(size, 2, 'size')
def __init__( self, rank, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation=None, # key of activation use_bias=True, normalize_weight=False, kernel_initializer='he_complex', bias_initializer='zeros', gamma_diag_initializer=sqrt_init, gamma_off_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, gamma_diag_regularizer=None, gamma_off_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, gamma_diag_constraint=None, gamma_off_constraint=None, init_criterion='he', seed=None, spectral_parametrization=False, transposed=False, epsilon=1e-7, **kwargs): super(_ComplexConv, self).__init__(**kwargs) self.rank = rank self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple( dilation_rate, rank, 'dilation_rate') self.activation = activation self.use_bias = use_bias self.normalize_weight = normalize_weight self.init_criterion = init_criterion self.spectral_parametrization = spectral_parametrization self.transposed = transposed self.epsilon = epsilon self.kernel_initializer = sanitizedInitGet(kernel_initializer) self.bias_initializer = sanitizedInitGet(bias_initializer) self.gamma_diag_initializer = sanitizedInitGet(gamma_diag_initializer) self.gamma_off_initializer = sanitizedInitGet(gamma_off_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.gamma_diag_regularizer = regularizers.get(gamma_diag_regularizer) self.gamma_off_regularizer = regularizers.get(gamma_off_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.gamma_diag_constraint = constraints.get(gamma_diag_constraint) self.gamma_off_constraint = constraints.get(gamma_off_constraint) self.seed = seed if seed is not None else np.random.randint(1, 1e6) self.input_spec = InputSpec(ndim=self.rank + 2) # The following are initialized later self.kernel_shape = None self.kernel = None self.gamma_rr = None self.gamma_ii = None self.gamma_ri = None self.bias = None
def __init__(self, data_format=None): self.data_format = normalize_data_format(data_format)
def __init__(self, rank, filters, kernel_size, strides=1, padding="valid", data_format=None, dilation_rate=1, activation=None, use_bias=True, normalize_weight=False, kernel_initializer="complex", bias_initializer="zeros", gamma_diag_initializer=sqrt_init, gamma_off_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, gamma_diag_regularizer=None, gamma_off_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, gamma_diag_constraint=None, gamma_off_constraint=None, init_criterion="he", seed=None, spectral_parametrization=False, transposed=False, epsilon=1e-7, **kwargs): super(ComplexConv, self).__init__(**kwargs) self.rank = rank self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, "kernel_size") self.strides = conv_utils.normalize_tuple(strides, rank, "strides") self.padding = conv_utils.normalize_padding(padding) self.data_format = "channels_last" \ if rank == 1 else normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple( dilation_rate, rank, "dilation_rate") self.activation = activations.get(activation) self.use_bias = use_bias self.normalize_weight = normalize_weight self.init_criterion = init_criterion self.spectral_parametrization = spectral_parametrization self.transposed = transposed self.epsilon = epsilon self.kernel_initializer = sanitizedInitGet(kernel_initializer) self.bias_initializer = sanitizedInitGet(bias_initializer) self.gamma_diag_initializer = sanitizedInitGet(gamma_diag_initializer) self.gamma_off_initializer = sanitizedInitGet(gamma_off_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.gamma_diag_regularizer = regularizers.get(gamma_diag_regularizer) self.gamma_off_regularizer = regularizers.get(gamma_off_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.gamma_diag_constraint = constraints.get(gamma_diag_constraint) self.gamma_off_constraint = constraints.get(gamma_off_constraint) if seed is None: self.seed = np.random.randint(1, 10e6) else: self.seed = seed self.input_spec = InputSpec(ndim=self.rank + 2) # The following are initialized later self.kernel_shape = None self.kernel = None self.gamma_rr = None self.gamma_ii = None self.gamma_ri = None self.bias = None
def __init__(self, upsampling=(2, 2), data_format=None, **kwargs): super(BilinearUpsampling, self).__init__(**kwargs) self.data_format = common.normalize_data_format(data_format) # self.data_format = conv_utils.normalize_padding(data_format) self.upsampling = conv_utils.normalize_tuple(upsampling, 2, 'size') self.input_spec = InputSpec(ndim=4)
def __init__(self, size=(2, 2), data_format=None, **kwargs): super(BilinearUpSampling2D, self).__init__(**kwargs) #self.data_format = conv_utils.normalize_data_format(data_format) self.data_format = normalize_data_format(data_format) self.size = conv_utils.normalize_tuple(size, 2, 'size') self.input_spec = InputSpec(ndim=4)
def get_data(train_left=0.0, train_right=0.7, train_all=0.7, resize=True, data_format=None, t=''): # 从文件夹中获取图像数据 # file_name = os.path.join(pic_dir_out, data_name + t + '_' + str(train_left) + '_' + str(train_right) + '_' + str( # Width) + "X" + str(Height) + ".h5") # # if os.path.exists(file_name): # 判断之前是否有存到文件中 # f = h5py.File(file_name, 'r') # if t == 'train': # X_train = f['X_train'][:] # y_train = f['y_train'][:] # f.close() # return (X_train, y_train) # elif t == 'test': # X_test = f['X_test'][:] # y_test = f['y_test'][:] # f.close() # return (X_test, y_test) # else: # return data_format = normalize_data_format(data_format) pic_dir_set = eachFile(pic_dir) X_train = [] y_train = [] X_test = [] y_test = [] label = 0 for pic_dir_name in pic_dir_set: if not os.path.isdir(os.path.join(pic_dir, pic_dir_name)): # 如果不是文件夹,跳过 continue pic_set = eachFile(os.path.join(pic_dir, pic_dir_name)) # 图片列表 pic_index = 0 train_count = int(len(pic_set) * train_all) # 训练样本数目 train_l = int(len(pic_set) * train_left) train_r = int(len(pic_set) * train_right) for pic_name in pic_set: if not os.path.isfile(os.path.join(pic_dir, pic_dir_name, pic_name)): continue img = cv2.imread(os.path.join(pic_dir, pic_dir_name, pic_name)) if img is None: continue if (resize): img = cv2.resize(img, (Width, Height)) img = img.reshape(-1, Width, Height, 3) if (pic_index < train_count): # 统计数量 if t == 'train': if (pic_index >= train_l and pic_index < train_r): X_train.append(img) y_train.append(label) else: if t == 'test': X_test.append(img) y_test.append(label) pic_index += 1 if not len(pic_set) == 0: label += 1 # f = h5py.File(file_name, 'w') return (X_train, y_train)