def create_model(input_dict_size, output_dict_size, input_length=DEFAULT_INPUT_LENGTH, output_length=DEFAULT_OUTPUT_LENGTH): encoder_input = Input(shape=(input_length, )) decoder_input = Input(shape=(output_length, )) encoder = Embedding(input_dict_size, 64, input_length=input_length, mask_zero=True)(encoder_input) encoder = LSTM(64, return_sequences=False)(encoder) decoder = Embedding(output_dict_size, 64, input_length=output_length, mask_zero=True)(decoder_input) decoder = LSTM(64, return_sequences=True)(decoder, initial_state=[encoder, encoder]) decoder = TimeDistributed(Dense(output_dict_size, activation="softmax"))(decoder) model = Model(inputs=[encoder_input, decoder_input], outputs=[decoder]) model.compile(optimizer='adam', loss='categorical_crossentropy') return model
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith( '.h5'), 'Keras model or weights must be a .h5 file.' # Load model, or construct model and load weights. num_anchors = len(self.anchors) num_classes = len(self.class_names) # is_tiny_version = num_anchors == 6 # default setting self.yolo_model = custom_yolo3_spp_body(Input(shape=(None, None, 3)), num_anchors // 3, num_classes) self.yolo_model.load_weights( self.model_path) # make sure model, anchors and classes match print('{} model, anchors, and classes loaded.'.format(model_path)) np.random.seed(10101) # Fixed seed for consistent colors across runs. np.random.seed(None) # Reset seed to default. # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes
def generate_inceptionResnetv2_based_model(): irv2 = tf.keras.applications.inception_resnet_v2.InceptionResNetV2( include_top=False) irv2.trainable = False # This returns a tensor input1 = Input(shape=(299, 299, 3), name='input1') input2 = Input(shape=(299, 299, 3), name='input2') out1 = irv2(input1) out2 = irv2(input2) averPool = AveragePooling2D(pool_size=(8, 8)) out1 = averPool(out1) out2 = averPool(out2) y = concatenate([out1, out2]) dense = Dense(1) y = dense(y) activation = Activation('tanh') y = activation(y) y = Flatten()(y) model = Model(inputs=[input1, input2], outputs=y) return model
def build_model(): """Builds and returns the network.""" # Create the inputs to the network. sky_images = Input(shape=(480, 480, 3), name='SkyImages') # sky images tsi = Input(shape=(480, 480), dtype='int64', name='TSIDecisionImages') # TSI's decision images # Main body of the network conv1 = Convolution2D(filters=32, kernel_size=3, padding='same', data_format='channels_last', activation='relu')(sky_images) maxpool1 = MaxPool2D(pool_size=(1, 100), strides=(1, 1), padding='same', data_format='channels_last')(conv1) maxpool2 = MaxPool2D(pool_size=(100, 1), strides=(1, 1), padding='same', data_format='channels_last')(conv1) concat1 = concatenate([conv1, maxpool1], axis=3) concat2 = concatenate([maxpool2, concat1], axis=3) concat3 = concatenate([concat2, sky_images], axis=3) conv2 = Convolution2D(filters=4, kernel_size=3, padding='same', data_format='channels_last', activation='relu')(concat3) decision = DecidePixelColors()(conv2) model = Model(inputs=[sky_images], outputs=[conv2, decision]) # in outputs, , decision return model
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith( '.h5'), 'Keras model or weights must be a .h5 file.' # Load model, or construct model and load weights. num_anchors = len(self.anchors) num_classes = len(self.class_names) is_tiny_version = num_anchors == 6 # default setting try: self.yolo_model = load_model(model_path, compile=False) except: self.yolo_model = custom_yolo3_spp_body( Input(shape=(None, None, 3)), num_anchors // 3, num_classes) # self.yolo_model = yolo3_efficientnet_body(Input(shape=(None, None, 3)), num_anchors // 3, num_classes, # level=5) self.yolo_model.load_weights( self.model_path) # make sure model, anchors and classes match else: assert self.yolo_model.layers[-1].output_shape[-1] == \ num_anchors / len(self.yolo_model.output) * (num_classes + 5), \ 'Mismatch between model and given anchor and class sizes' print('{} model, anchors, and classes loaded.'.format(model_path)) # Generate colors for drawing bounding boxes. hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) np.random.seed(10101) # Fixed seed for consistent colors across runs. np.random.shuffle( self.colors) # Shuffle colors to decorrelate adjacent classes. np.random.seed(None) # Reset seed to default. # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) if self.gpu_num >= 2: self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num) boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes
import tensorflow as tf from tensorflow._api.v1.keras import preprocessing from tensorflow._api.v1.keras.layers import Input, Dense, Conv2D, MaxPool2D, Dropout, ReLU, BatchNormalization, concatenate, Flatten, GlobalAveragePooling2D from tensorflow._api.v1.keras.models import Model # This returns a tensor inputs = Input(shape=(32, 32, 3)) pre_net = Conv2D(64, (7, 7), strides=(2, 2)) # a layer instance is callable on a tensor, and returns a tensor x = Dense(64, activation='relu')(inputs) x = Dense(64, activation='relu')(x) predictions = Dense(10, activation='softmax')(x) # This creates a model that includes # the Input layer and three Dense layers model = Model(inputs=inputs, outputs=predictions) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.summary()
def _main(args): config_path = os.path.expanduser(args.config_path) weights_path = os.path.expanduser(args.weights_path) assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format( config_path) assert weights_path.endswith( '.weights'), '{} is not a .weights file'.format(weights_path) output_path = os.path.expanduser(args.output_path) assert output_path.endswith( '.h5'), 'output path {} is not a .h5 file'.format(output_path) output_root = os.path.splitext(output_path)[0] # Load weights and config. print('Loading weights.') weights_file = open(weights_path, 'rb') major, minor, revision = np.ndarray( shape=(3,), dtype='int32', buffer=weights_file.read(12)) if (major * 10 + minor) >= 2 and major < 1000 and minor < 1000: seen = np.ndarray(shape=(1,), dtype='int64', buffer=weights_file.read(8)) else: seen = np.ndarray(shape=(1,), dtype='int32', buffer=weights_file.read(4)) print('Weights Header: ', major, minor, revision, seen) print('Parsing Darknet config.') unique_config_file = unique_config_sections(config_path) cfg_parser = configparser.ConfigParser() cfg_parser.read_file(unique_config_file) print('Creating Keras model.') input_layer = Input(shape=(None, None, 3), name='image_input') prev_layer = input_layer all_layers = [] weight_decay = float(cfg_parser['net_0']['decay'] ) if 'net_0' in cfg_parser.sections() else 5e-4 count = 0 out_index = [] for section in cfg_parser.sections(): print('Parsing section {}'.format(section)) if section.startswith('convolutional'): filters = int(cfg_parser[section]['filters']) size = int(cfg_parser[section]['size']) stride = int(cfg_parser[section]['stride']) pad = int(cfg_parser[section]['pad']) activation = cfg_parser[section]['activation'] batch_normalize = 'batch_normalize' in cfg_parser[section] padding = 'same' if pad == 1 and stride == 1 else 'valid' # Setting weights. # Darknet serializes convolutional weights as: # [bias/beta, [gamma, mean, variance], conv_weights] prev_layer_shape = K.int_shape(prev_layer) weights_shape = (size, size, prev_layer_shape[-1], filters) darknet_w_shape = (filters, weights_shape[2], size, size) weights_size = np.product(weights_shape) print('conv2d', 'bn' if batch_normalize else ' ', activation, weights_shape) conv_bias = np.ndarray( shape=(filters,), dtype='float32', buffer=weights_file.read(filters * 4)) count += filters if batch_normalize: bn_weights = np.ndarray( shape=(3, filters), dtype='float32', buffer=weights_file.read(filters * 12)) count += 3 * filters bn_weight_list = [ bn_weights[0], # scale gamma conv_bias, # shift beta bn_weights[1], # running mean bn_weights[2] # running var ] conv_weights = np.ndarray( shape=darknet_w_shape, dtype='float32', buffer=weights_file.read(weights_size * 4)) count += weights_size # DarkNet conv_weights are serialized Caffe-style: # (out_dim, in_dim, height, width) # We would like to set these to Tensorflow order: # (height, width, in_dim, out_dim) conv_weights = np.transpose(conv_weights, [2, 3, 1, 0]) conv_weights = [conv_weights] if batch_normalize else [ conv_weights, conv_bias ] # Handle activation. act_fn = None if activation == 'leaky': pass # Add advanced activation later. elif activation == 'mish': pass # Add advanced activation later. elif activation != 'linear': raise ValueError( 'Unknown activation function `{}` in section {}'.format( activation, section)) # Create Conv2D layer if stride > 1: # Darknet uses left and top padding instead of 'same' mode prev_layer = ZeroPadding2D(((1, 0), (1, 0)))(prev_layer) conv_layer = (Conv2D( filters, (size, size), strides=(stride, stride), kernel_regularizer=l2(weight_decay), use_bias=not batch_normalize, weights=conv_weights, activation=act_fn, padding=padding))(prev_layer) if batch_normalize: conv_layer = (BatchNormalization( weights=bn_weight_list))(conv_layer) prev_layer = conv_layer if activation == 'linear': all_layers.append(prev_layer) # elif activation == 'mish': # act_layer = Activation(mish)(prev_layer) # prev_layer = act_layer # all_layers.append(act_layer) elif activation == 'leaky': act_layer = LeakyReLU(alpha=0.1)(prev_layer) prev_layer = act_layer all_layers.append(act_layer) elif section.startswith('route'): ids = [int(i) for i in cfg_parser[section]['layers'].split(',')] layers = [all_layers[i] for i in ids] if len(layers) > 1: print('Concatenating route layers:', layers) concatenate_layer = Concatenate()(layers) all_layers.append(concatenate_layer) prev_layer = concatenate_layer else: skip_layer = layers[0] # only one layer to route all_layers.append(skip_layer) prev_layer = skip_layer elif section.startswith('maxpool'): size = int(cfg_parser[section]['size']) stride = int(cfg_parser[section]['stride']) all_layers.append( MaxPooling2D( pool_size=(size, size), strides=(stride, stride), padding='same')(prev_layer)) prev_layer = all_layers[-1] elif section.startswith('avgpool'): all_layers.append( AveragePooling2D()(prev_layer)) prev_layer = all_layers[-1] elif section.startswith('shortcut'): index = int(cfg_parser[section]['from']) activation = cfg_parser[section]['activation'] assert activation == 'linear', 'Only linear activation supported.' all_layers.append(Add()([all_layers[index], prev_layer])) prev_layer = all_layers[-1] elif section.startswith('upsample'): stride = int(cfg_parser[section]['stride']) assert stride == 2, 'Only stride=2 supported.' all_layers.append(UpSampling2D(stride)(prev_layer)) prev_layer = all_layers[-1] elif section.startswith('reorg'): block_size = int(cfg_parser[section]['stride']) assert block_size == 2, 'Only reorg with stride 2 supported.' all_layers.append( Lambda( # space_to_depth_x2, # output_shape=space_to_depth_x2_output_shape, lambda x: tf.nn.space_to_depth(x, block_size=2), name='space_to_depth_x2')(prev_layer)) prev_layer = all_layers[-1] elif section.startswith('region'): with open('{}_anchors.txt'.format(output_root), 'w') as f: print(cfg_parser[section]['anchors'], file=f) elif section.startswith('yolo'): out_index.append(len(all_layers) - 1) all_layers.append(None) prev_layer = all_layers[-1] elif (section.startswith('net') or section.startswith('cost') or section.startswith('softmax')): pass else: raise ValueError( 'Unsupported section header type: {}'.format(section)) # Create and save model. if len(out_index) == 0: out_index.append(len(all_layers) - 1) if args.yolo4_reorder: # reverse the output tensor index for YOLOv4 cfg & weights, # since it use a different yolo outout order out_index.reverse() model = Model(inputs=input_layer, outputs=[all_layers[i] for i in out_index]) print(model.summary()) if args.weights_only: model.save_weights('{}'.format(output_path)) print('Saved Keras weights to {}'.format(output_path)) else: model.save('{}'.format(output_path)) print('Saved Keras model to {}'.format(output_path)) # Check to see if all weights have been read. remaining_weights = len(weights_file.read()) / 4 weights_file.close() print('Read {} of {} from Darknet weights.'.format(count, count + remaining_weights)) if remaining_weights > 0: print('Warning: {} unused weights'.format(remaining_weights)) if args.plot_model: plot(model, to_file='{}.png'.format(output_root), show_shapes=True) print('Saved model plot to {}.png'.format(output_root))
the_image = image.load_img(image_path, target_size=(224, 224, 3)) x = image.img_to_array(the_image) x = np.expand_dims(x, axis=0) x = preprocess_input(x) img_array.append(x) img_data = np.array(img_array) print(img_data.shape) img_data = np.rollaxis(img_data, 1, 0) print(img_data.shape) img_data = img_data[0] print(img_data.shape) num_classes = 6 train_X, test_X, train_Y, test_Y = train_test_split(img_data, label_array, test_size=0.10) image_input = Input(shape=(224, 224, 3)) model = VGG19(input_tensor=image_input, include_top=True, weights=None) last_layer = model.get_layer('fc2').output #VGG # last_layer = model.get_layer('fc1000').output #RES # last_layer = model.get_layer('predictions').output last_layer = Dropout(0.5)(last_layer) out = Dense(num_classes, activation="softmax", name="output")(last_layer) model = Model(image_input, out) for layer in model.layers[:-1]: layer.trainable = False model.summary() model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999,
''' Keras (Tensorflow) implementation of model from "DeXpression: Deep Convolutional Neural Network for Expression Recognition" https://arxiv.org/pdf/1509.05371v2.pdf. By Minkesh Asati ''' import tensorflow as tf from tensorflow._api.v1.keras import preprocessing from tensorflow._api.v1.keras.layers import Input, Dense, Conv2D, MaxPool2D, Dropout, ReLU, BatchNormalization, concatenate, Flatten, GlobalAveragePooling2D from tensorflow._api.v1.keras.models import Model import numpy inputs = Input(shape=(128, 128, 3)) pre_net = Conv2D(64, (7, 7), strides=(2, 2))(inputs) pre_net = ReLU()(pre_net) pre_net = MaxPool2D(pool_size=(3, 3), strides=(1, 1))(pre_net) pre_net = BatchNormalization()(pre_net) def feature_extractor(input_net): net_1 = Conv2D(96, 1, 1)(input_net) net_1 = ReLU()(net_1) net_1 = Conv2D(208, 3, 1)(net_1) net_1 = ReLU()(net_1) net_2 = MaxPool2D(3, 1)(input_net) net_2 = Conv2D(64, 1, 1)(net_2) net_2 = ReLU()(net_2) concat = concatenate(inputs=[net_1, net_2], axis=3) pooling_out = MaxPool2D(3, 2)(concat)
def main(train_epochs): print('Hello Lenin Welcome to Transfer Learning with VGG16') # Reading images to form X vector labels_name = {'benign': 0, 'malignant': 1} img_data, img_labels = read_dataset('/data_roi_single/train', labels_dict=labels_name) print(np.unique(img_labels, return_counts=True)) # categories_names = ['benign', 'malignant'] num_classes = 2 # labels = labelling_outputs(num_classes, img_data.shape[0]) # labels = labelling_mammo(num_classes, img_data.shape[0]) # converting class labels to one-hot encoding y_one_hot = to_categorical(img_labels, num_classes) #Shuffle data x, y = shuffle(img_data, y_one_hot, random_state=2) # Dataset split xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.2, random_state=2) ######################################################################################### # Custom_vgg_model_1 # Training the classifier alone image_input = Input(shape=(224, 224, 3)) model = VGG16(input_tensor=image_input, include_top=True, weights='imagenet') model.summary() last_layer = model.get_layer('fc2').output out = Dense(num_classes, activation='sigmoid', name='vgg16TL')(last_layer) # sigmoid insted of softmax custom_vgg_model = Model(image_input, out) custom_vgg_model.summary() # until this point the custom model is retrainable at all layers # Now we freeze all the layers up to the last one for layer in custom_vgg_model.layers[:-1]: layer.trainable = False custom_vgg_model.summary() # custom_vgg_model.layers[3].trainable # custom_vgg_model.layers[-1].trainable # Model compilation custom_vgg_model.compile( loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # binary cross entropy instead of categorical print('Transfer Learning Training...') t = time.time() num_of_epochs = train_epochs # User defines number of epochs hist = custom_vgg_model.fit(xtrain, ytrain, batch_size=64, epochs=num_of_epochs, verbose=1, validation_data=(xtest, ytest)) print('Training time: %s' % (time.time() - t)) # Model saving parameters custom_vgg_model.save('vgg16_tf_bc.h5') print('Evaluation...') (loss, accuracy) = custom_vgg_model.evaluate(xtest, ytest, batch_size=10, verbose=1) print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss, accuracy * 100)) print("Finished") # Model Training Graphics # Visualizing losses and accuracy train_loss = hist.history['loss'] val_loss = hist.history['val_loss'] train_acc = hist.history['acc'] val_acc = hist.history['val_acc'] xc = range(num_of_epochs) # Este valor esta anclado al numero de epocas plt.figure(1, figsize=(7, 5)) plt.plot(xc, train_loss) plt.plot(xc, val_loss) plt.xlabel('num of epochs') plt.ylabel('loss') plt.title('train_loss vs val_loss') plt.grid(True) plt.legend(['train', 'val']) plt.style.use(['classic']) # revisar que mas hay plt.savefig('vgg16_train_val_loss.jpg') plt.figure(2, figsize=(7, 5)) plt.plot(xc, train_acc) plt.plot(xc, val_acc) plt.xlabel('num of epochs') plt.ylabel('accuracy') plt.title('train_accuracy vs val_accuracy') plt.grid(True) plt.legend(['train', 'val'], loc=4) plt.style.use(['classic']) # revisar que mas hay plt.savefig('vgg16_train_val_acc.jpg') plt.show()