def _main(args): config_path = os.path.expanduser(args.config_path) weights_path = os.path.expanduser(args.weights_path) assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format( config_path) assert weights_path.endswith( '.weights'), '{} is not a .weights file'.format(weights_path) output_path = os.path.expanduser(args.output_path) assert output_path.endswith( '.h5'), 'output path {} is not a .h5 file'.format(output_path) output_root = os.path.splitext(output_path)[0] # Load weights and config. print('Loading weights.') weights_file = open(weights_path, 'rb') major, minor, revision = np.ndarray( shape=(3,), dtype='int32', buffer=weights_file.read(12)) if (major * 10 + minor) >= 2 and major < 1000 and minor < 1000: seen = np.ndarray(shape=(1,), dtype='int64', buffer=weights_file.read(8)) else: seen = np.ndarray(shape=(1,), dtype='int32', buffer=weights_file.read(4)) print('Weights Header: ', major, minor, revision, seen) print('Parsing Darknet config.') unique_config_file = unique_config_sections(config_path) cfg_parser = configparser.ConfigParser() cfg_parser.read_file(unique_config_file) print('Creating Keras model.') input_layer = Input(shape=(None, None, 3), name='image_input') prev_layer = input_layer all_layers = [] weight_decay = float(cfg_parser['net_0']['decay'] ) if 'net_0' in cfg_parser.sections() else 5e-4 count = 0 out_index = [] for section in cfg_parser.sections(): print('Parsing section {}'.format(section)) if section.startswith('convolutional'): filters = int(cfg_parser[section]['filters']) size = int(cfg_parser[section]['size']) stride = int(cfg_parser[section]['stride']) pad = int(cfg_parser[section]['pad']) activation = cfg_parser[section]['activation'] batch_normalize = 'batch_normalize' in cfg_parser[section] padding = 'same' if pad == 1 and stride == 1 else 'valid' # Setting weights. # Darknet serializes convolutional weights as: # [bias/beta, [gamma, mean, variance], conv_weights] prev_layer_shape = K.int_shape(prev_layer) weights_shape = (size, size, prev_layer_shape[-1], filters) darknet_w_shape = (filters, weights_shape[2], size, size) weights_size = np.product(weights_shape) print('conv2d', 'bn' if batch_normalize else ' ', activation, weights_shape) conv_bias = np.ndarray( shape=(filters,), dtype='float32', buffer=weights_file.read(filters * 4)) count += filters if batch_normalize: bn_weights = np.ndarray( shape=(3, filters), dtype='float32', buffer=weights_file.read(filters * 12)) count += 3 * filters bn_weight_list = [ bn_weights[0], # scale gamma conv_bias, # shift beta bn_weights[1], # running mean bn_weights[2] # running var ] conv_weights = np.ndarray( shape=darknet_w_shape, dtype='float32', buffer=weights_file.read(weights_size * 4)) count += weights_size # DarkNet conv_weights are serialized Caffe-style: # (out_dim, in_dim, height, width) # We would like to set these to Tensorflow order: # (height, width, in_dim, out_dim) conv_weights = np.transpose(conv_weights, [2, 3, 1, 0]) conv_weights = [conv_weights] if batch_normalize else [ conv_weights, conv_bias ] # Handle activation. act_fn = None if activation == 'leaky': pass # Add advanced activation later. elif activation == 'mish': pass # Add advanced activation later. elif activation != 'linear': raise ValueError( 'Unknown activation function `{}` in section {}'.format( activation, section)) # Create Conv2D layer if stride > 1: # Darknet uses left and top padding instead of 'same' mode prev_layer = ZeroPadding2D(((1, 0), (1, 0)))(prev_layer) conv_layer = (Conv2D( filters, (size, size), strides=(stride, stride), kernel_regularizer=l2(weight_decay), use_bias=not batch_normalize, weights=conv_weights, activation=act_fn, padding=padding))(prev_layer) if batch_normalize: conv_layer = (BatchNormalization( weights=bn_weight_list))(conv_layer) prev_layer = conv_layer if activation == 'linear': all_layers.append(prev_layer) # elif activation == 'mish': # act_layer = Activation(mish)(prev_layer) # prev_layer = act_layer # all_layers.append(act_layer) elif activation == 'leaky': act_layer = LeakyReLU(alpha=0.1)(prev_layer) prev_layer = act_layer all_layers.append(act_layer) elif section.startswith('route'): ids = [int(i) for i in cfg_parser[section]['layers'].split(',')] layers = [all_layers[i] for i in ids] if len(layers) > 1: print('Concatenating route layers:', layers) concatenate_layer = Concatenate()(layers) all_layers.append(concatenate_layer) prev_layer = concatenate_layer else: skip_layer = layers[0] # only one layer to route all_layers.append(skip_layer) prev_layer = skip_layer elif section.startswith('maxpool'): size = int(cfg_parser[section]['size']) stride = int(cfg_parser[section]['stride']) all_layers.append( MaxPooling2D( pool_size=(size, size), strides=(stride, stride), padding='same')(prev_layer)) prev_layer = all_layers[-1] elif section.startswith('avgpool'): all_layers.append( AveragePooling2D()(prev_layer)) prev_layer = all_layers[-1] elif section.startswith('shortcut'): index = int(cfg_parser[section]['from']) activation = cfg_parser[section]['activation'] assert activation == 'linear', 'Only linear activation supported.' all_layers.append(Add()([all_layers[index], prev_layer])) prev_layer = all_layers[-1] elif section.startswith('upsample'): stride = int(cfg_parser[section]['stride']) assert stride == 2, 'Only stride=2 supported.' all_layers.append(UpSampling2D(stride)(prev_layer)) prev_layer = all_layers[-1] elif section.startswith('reorg'): block_size = int(cfg_parser[section]['stride']) assert block_size == 2, 'Only reorg with stride 2 supported.' all_layers.append( Lambda( # space_to_depth_x2, # output_shape=space_to_depth_x2_output_shape, lambda x: tf.nn.space_to_depth(x, block_size=2), name='space_to_depth_x2')(prev_layer)) prev_layer = all_layers[-1] elif section.startswith('region'): with open('{}_anchors.txt'.format(output_root), 'w') as f: print(cfg_parser[section]['anchors'], file=f) elif section.startswith('yolo'): out_index.append(len(all_layers) - 1) all_layers.append(None) prev_layer = all_layers[-1] elif (section.startswith('net') or section.startswith('cost') or section.startswith('softmax')): pass else: raise ValueError( 'Unsupported section header type: {}'.format(section)) # Create and save model. if len(out_index) == 0: out_index.append(len(all_layers) - 1) if args.yolo4_reorder: # reverse the output tensor index for YOLOv4 cfg & weights, # since it use a different yolo outout order out_index.reverse() model = Model(inputs=input_layer, outputs=[all_layers[i] for i in out_index]) print(model.summary()) if args.weights_only: model.save_weights('{}'.format(output_path)) print('Saved Keras weights to {}'.format(output_path)) else: model.save('{}'.format(output_path)) print('Saved Keras model to {}'.format(output_path)) # Check to see if all weights have been read. remaining_weights = len(weights_file.read()) / 4 weights_file.close() print('Read {} of {} from Darknet weights.'.format(count, count + remaining_weights)) if remaining_weights > 0: print('Warning: {} unused weights'.format(remaining_weights)) if args.plot_model: plot(model, to_file='{}.png'.format(output_root), show_shapes=True) print('Saved model plot to {}.png'.format(output_root))
optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), metrics=['accuracy']) times = 50 while (True): hist = model.fit(train_X, train_Y, batch_size=24, epochs=times, verbose=1, validation_data=(test_X, test_Y)) a = input("continue?y/n") if (a == "y"): times = int(input("times:")) else: break (loss, accuracy) = model.evaluate(test_X, test_Y, batch_size=8, verbose=1) prediction = model.predict(test_X) predict = np.argmax(prediction, axis=1) print("Save Model?y/n") a = input() if (a == "y"): print("Saving Model") model.save("GORES888.h5", overwrite=True, include_optimizer=True) else: print("Over")
out = Dense(num_classes, activation="softmax", name="output")(last_layer) model = Model(image_input, out) for layer in model.layers[:-1]: layer.trainable = False model.summary() model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), metrics=['accuracy']) hist = model.fit(train_X, train_Y, batch_size=16, epochs=30, verbose=1, validation_data=(test_X, test_Y)) (loss, accuracy) = model.evaluate(test_X, test_Y, batch_size=8, verbose=1) prediction = model.predict(test_X) predict = np.argmax(prediction, axis=1) print("Save Model?y/n") a = input() if (a == "y"): print("Saving Model") model.save("RESRetry14.h5", overwrite=True, include_optimizer=True) else: print("Over")
def main(train_epochs): print('Hello Lenin Welcome to Transfer Learning with VGG16') # Reading images to form X vector labels_name = {'benign': 0, 'malignant': 1} img_data, img_labels = read_dataset('/data_roi_single/train', labels_dict=labels_name) print(np.unique(img_labels, return_counts=True)) # categories_names = ['benign', 'malignant'] num_classes = 2 # labels = labelling_outputs(num_classes, img_data.shape[0]) # labels = labelling_mammo(num_classes, img_data.shape[0]) # converting class labels to one-hot encoding y_one_hot = to_categorical(img_labels, num_classes) #Shuffle data x, y = shuffle(img_data, y_one_hot, random_state=2) # Dataset split xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.2, random_state=2) ######################################################################################### # Custom_vgg_model_1 # Training the classifier alone image_input = Input(shape=(224, 224, 3)) model = VGG16(input_tensor=image_input, include_top=True, weights='imagenet') model.summary() last_layer = model.get_layer('fc2').output out = Dense(num_classes, activation='sigmoid', name='vgg16TL')(last_layer) # sigmoid insted of softmax custom_vgg_model = Model(image_input, out) custom_vgg_model.summary() # until this point the custom model is retrainable at all layers # Now we freeze all the layers up to the last one for layer in custom_vgg_model.layers[:-1]: layer.trainable = False custom_vgg_model.summary() # custom_vgg_model.layers[3].trainable # custom_vgg_model.layers[-1].trainable # Model compilation custom_vgg_model.compile( loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # binary cross entropy instead of categorical print('Transfer Learning Training...') t = time.time() num_of_epochs = train_epochs # User defines number of epochs hist = custom_vgg_model.fit(xtrain, ytrain, batch_size=64, epochs=num_of_epochs, verbose=1, validation_data=(xtest, ytest)) print('Training time: %s' % (time.time() - t)) # Model saving parameters custom_vgg_model.save('vgg16_tf_bc.h5') print('Evaluation...') (loss, accuracy) = custom_vgg_model.evaluate(xtest, ytest, batch_size=10, verbose=1) print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss, accuracy * 100)) print("Finished") # Model Training Graphics # Visualizing losses and accuracy train_loss = hist.history['loss'] val_loss = hist.history['val_loss'] train_acc = hist.history['acc'] val_acc = hist.history['val_acc'] xc = range(num_of_epochs) # Este valor esta anclado al numero de epocas plt.figure(1, figsize=(7, 5)) plt.plot(xc, train_loss) plt.plot(xc, val_loss) plt.xlabel('num of epochs') plt.ylabel('loss') plt.title('train_loss vs val_loss') plt.grid(True) plt.legend(['train', 'val']) plt.style.use(['classic']) # revisar que mas hay plt.savefig('vgg16_train_val_loss.jpg') plt.figure(2, figsize=(7, 5)) plt.plot(xc, train_acc) plt.plot(xc, val_acc) plt.xlabel('num of epochs') plt.ylabel('accuracy') plt.title('train_accuracy vs val_accuracy') plt.grid(True) plt.legend(['train', 'val'], loc=4) plt.style.use(['classic']) # revisar que mas hay plt.savefig('vgg16_train_val_acc.jpg') plt.show()