def _main(args): config_path = os.path.expanduser(args.config_path) weights_path = os.path.expanduser(args.weights_path) assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format( config_path) assert weights_path.endswith( '.weights'), '{} is not a .weights file'.format(weights_path) output_path = os.path.expanduser(args.output_path) if args.tfsm: output_root = output_path else: assert output_path.endswith( '.h5'), 'output path {} is not a .h5 file'.format(output_path) output_root = os.path.splitext(output_path)[0] # Load weights and config. print('Loading weights.') weights_file = open(weights_path, 'rb') major, minor, revision = np.ndarray( shape=(3, ), dtype='int32', buffer=weights_file.read(12)) if (major*10+minor)>=2 and major<1000 and minor<1000: seen = np.ndarray(shape=(1,), dtype='int64', buffer=weights_file.read(8)) else: seen = np.ndarray(shape=(1,), dtype='int32', buffer=weights_file.read(4)) print('Weights Header: ', major, minor, revision, seen) print('Parsing Darknet config.') unique_config_file = unique_config_sections(config_path) cfg_parser = configparser.ConfigParser() cfg_parser.read_file(unique_config_file) print('Creating Keras model.') input_layer = Input(shape=(None, None, 3)) prev_layer = input_layer all_layers = [] weight_decay = float(cfg_parser['net_0']['decay'] ) if 'net_0' in cfg_parser.sections() else 5e-4 count = 0 out_index = [] for section in cfg_parser.sections(): print('Parsing section {}'.format(section)) if section.startswith('convolutional'): filters = int(cfg_parser[section]['filters']) size = int(cfg_parser[section]['size']) stride = int(cfg_parser[section]['stride']) pad = int(cfg_parser[section]['pad']) activation = cfg_parser[section]['activation'] batch_normalize = 'batch_normalize' in cfg_parser[section] padding = 'same' if pad == 1 and stride == 1 else 'valid' # Setting weights. # Darknet serializes convolutional weights as: # [bias/beta, [gamma, mean, variance], conv_weights] prev_layer_shape = K.int_shape(prev_layer) weights_shape = (size, size, prev_layer_shape[-1], filters) darknet_w_shape = (filters, weights_shape[2], size, size) weights_size = np.product(weights_shape) print('conv2d', 'bn' if batch_normalize else ' ', activation, weights_shape) conv_bias = np.ndarray( shape=(filters, ), dtype='float32', buffer=weights_file.read(filters * 4)) count += filters if batch_normalize: bn_weights = np.ndarray( shape=(3, filters), dtype='float32', buffer=weights_file.read(filters * 12)) count += 3 * filters bn_weight_list = [ bn_weights[0], # scale gamma conv_bias, # shift beta bn_weights[1], # running mean bn_weights[2] # running var ] conv_weights = np.ndarray( shape=darknet_w_shape, dtype='float32', buffer=weights_file.read(weights_size * 4)) count += weights_size # DarkNet conv_weights are serialized Caffe-style: # (out_dim, in_dim, height, width) # We would like to set these to Tensorflow order: # (height, width, in_dim, out_dim) conv_weights = np.transpose(conv_weights, [2, 3, 1, 0]) conv_weights = [conv_weights] if batch_normalize else [ conv_weights, conv_bias ] # Handle activation. act_fn = None if activation == 'leaky': pass # Add advanced activation later. elif activation != 'linear': raise ValueError( 'Unknown activation function `{}` in section {}'.format( activation, section)) # Create Conv2D layer if stride>1: # Darknet uses left and top padding instead of 'same' mode prev_layer = ZeroPadding2D(((1,0),(1,0)))(prev_layer) conv_layer = (Conv2D( filters, (size, size), strides=(stride, stride), kernel_regularizer=l2(weight_decay), use_bias=not batch_normalize, weights=conv_weights, activation=act_fn, padding=padding))(prev_layer) if batch_normalize: conv_layer = (BatchNormalization( weights=bn_weight_list))(conv_layer) prev_layer = conv_layer if activation == 'linear': all_layers.append(prev_layer) elif activation == 'leaky': act_layer = LeakyReLU(alpha=0.1)(prev_layer) prev_layer = act_layer all_layers.append(act_layer) elif section.startswith('route'): ids = [int(i) for i in cfg_parser[section]['layers'].split(',')] layers = [all_layers[i] for i in ids] if len(layers) > 1: print('Concatenating route layers:', layers) concatenate_layer = Concatenate()(layers) all_layers.append(concatenate_layer) prev_layer = concatenate_layer else: skip_layer = layers[0] # only one layer to route all_layers.append(skip_layer) prev_layer = skip_layer elif section.startswith('maxpool'): size = int(cfg_parser[section]['size']) stride = int(cfg_parser[section]['stride']) all_layers.append( MaxPooling2D( pool_size=(size, size), strides=(stride, stride), padding='same')(prev_layer)) prev_layer = all_layers[-1] elif section.startswith('shortcut'): index = int(cfg_parser[section]['from']) activation = cfg_parser[section]['activation'] assert activation == 'linear', 'Only linear activation supported.' all_layers.append(Add()([all_layers[index], prev_layer])) prev_layer = all_layers[-1] elif section.startswith('upsample'): stride = int(cfg_parser[section]['stride']) assert stride == 2, 'Only stride=2 supported.' all_layers.append(UpSampling2D(stride)(prev_layer)) prev_layer = all_layers[-1] elif section.startswith('yolo'): out_index.append(len(all_layers)-1) all_layers.append(None) prev_layer = all_layers[-1] elif section.startswith('net'): pass else: raise ValueError( 'Unsupported section header type: {}'.format(section)) # Create and save model. model = Model(inputs=input_layer, outputs=[all_layers[i] for i in out_index]) print(model.summary()) if len(out_index)==0: out_index.append(len(all_layers)-1) if args.tfsm: yolo_model = YOLO(model=model) inputs = K.saved_model.utils.build_tensor_info(input_layer) boxes, scores, classes = yolo_model.generate() signature = (K.saved_model.signature_def_utils.build_signature_def( inputs={K.saved_model.signature_constants.CLASSIFY_INPUTS: inputs}, outputs={ "boxes": boxes, K.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES: scores, K.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES: classes }, method_name="detect_objects")) builder = K.saved_model.builder.SavedModelBuilder(args.output_path) builder.add_meta_graph_and_variables( yolo_model.get_session(), [tf.saved_model.tag_constants.SERVING], signature_def_map={ K.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature }, main_op=K.tables_initializer(), strip_default_attrs=True) builder.save() elif args.weights_only: model.save_weights('{}'.format(output_path)) print('Saved Keras weights to {}'.format(output_path)) else: model.save('{}'.format(output_path)) print('Saved Keras model to {}'.format(output_path)) # Check to see if all weights have been read. remaining_weights = len(weights_file.read()) / 4 weights_file.close() print('Read {} of {} from Darknet weights.'.format(count, count + remaining_weights)) if remaining_weights > 0: print('Warning: {} unused weights'.format(remaining_weights)) if args.plot_model: plot(model, to_file='{}.png'.format(output_root), show_shapes=True) print('Saved model plot to {}.png'.format(output_root))
yolo_m1 = YOLO() # yolo_m2 = YOLO() # set model paths yolo_m1.model_path = 'model_data/final_model.h5' # yolo_m2.model_path ='model_data/model_2/with_6_categories.h5' # set anchors and txt_paths yolo_m1.anchors_path = 'model_data/new_anchors.txt' yolo_m1.classes_path = 'model_data/new_classes.txt' yolo_m1.class_names = yolo_m1._get_class() # yolo_m2.anchors_path = 'model_data/model_2/txts/anchors.txt' # yolo_m2.classes_path = 'model_data/model_2/txts/classes.txt' yolo_m1.anchors = yolo_m1._get_anchors() # load models in memory yolo_m1.boxes, yolo_m1.scores, yolo_m1.classes = yolo_m1.generate() print("model 1 loaded successfully...") # yolo_m2.boxes, yolo_m2.scores, yolo_m2.classes = yolo_m2.generate() # print("model 2 loaded successfully...") def detection_function(): om = '' predictions = {} directory = yolo_m1.test_images_directory files = os.listdir(directory) for image_name in files: try: image_for_model1 = Image.open(directory + image_name) # image_for_model2 = Image.open(directory+image_name)