Пример #1
0
def create_callbacks(saved_weights_name, tensorboard_logs):
    makedirs(tensorboard_logs)

    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0.001,
                               patience=4,
                               mode='min',
                               verbose=1)
    checkpoint = ModelCheckpoint(saved_weights_name,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=False,
                                 mode='min',
                                 period=1)
    reduce_on_plateau = ReduceLROnPlateau(monitor='val_loss',
                                          factor=0.1,
                                          patience=2,
                                          verbose=1,
                                          mode='min',
                                          epsilon=0.0001,
                                          cooldown=0,
                                          min_lr=0)
    tensorboard = TensorBoard(
        log_dir=tensorboard_logs,
        write_graph=True,
        write_images=True,
    )
    return [early_stop, checkpoint, reduce_on_plateau, tensorboard]
Пример #2
0
def create_callbacks(saved_weights_name, tensorboard_logs, model_to_save):
    makedirs(tensorboard_logs)

    early_stop = EarlyStopping(monitor='loss',
                               min_delta=0.01,
                               patience=7,
                               mode='min',
                               verbose=1)
    checkpoint = CustomModelCheckpoint(
        model_to_save=model_to_save,
        filepath=saved_weights_name,  # + '{epoch:02d}.h5', 
        monitor='loss',
        verbose=1,
        save_best_only=True,
        mode='min',
        period=1)
    reduce_on_plateau = ReduceLROnPlateau(monitor='loss',
                                          factor=0.1,
                                          patience=2,
                                          verbose=1,
                                          mode='min',
                                          epsilon=0.01,
                                          cooldown=0,
                                          min_lr=0)
    tensorboard = CustomTensorBoard(
        log_dir=tensorboard_logs,
        write_graph=True,
        write_images=True,
    )
    return [early_stop, checkpoint, reduce_on_plateau, tensorboard]
def create_callbacks(saved_weights_name, tensorboard_logs, model_to_save):
    makedirs(tensorboard_logs)

    early_stop = EarlyStopping(  # early stop
        monitor='loss',  #based on loss
        min_delta=0.01,  #if improvment less than 0.01 it's not a  real improve
        patience=5,  #number of times loss didn't improve to stop (5 times try )
        mode='min',  #based on loss choice we want it min
        verbose=1  #print epoch and extra info default is 0
    )
    checkpoint = CustomModelCheckpoint(
        model_to_save=model_to_save,
        filepath=saved_weights_name,  # + '{epoch:02d}.h5', 
        monitor='loss',
        verbose=1,
        save_best_only=True,
        mode='min',
        period=1)
    reduce_on_plateau = ReduceLROnPlateau(  # reduce learning rate when loss didn't improve
        monitor='loss',  #based ob loss
        factor=0.1,  #factir if reduce
        patience=2,  # every 2 epochs
        verbose=1,  # 0 equal quit , 1 update
        mode='min',  #min mode for loss
        epsilon=0.01,  #change in epsilon
        cooldown=
        0,  #number of epochs to wait before resuming normal operation after lr has been reduced.
        min_lr=0  #lower bound on the learning rate.
    )
    tensorboard = CustomTensorBoard(
        log_dir=tensorboard_logs,
        write_graph=True,
        write_images=True,
    )
    return [early_stop, checkpoint, reduce_on_plateau, tensorboard]
Пример #4
0
def create_callbacks(saved_weights_name, tensorboard_logs, model_to_save):
    makedirs(tensorboard_logs)
    
    early_stop = EarlyStopping(
        monitor     = 'loss', 
        min_delta   = 0.01, 
        patience    = 5, 
        mode        = 'min', 
        verbose     = 1
    )
    checkpoint = CustomModelCheckpoint(
        model_to_save   = model_to_save,
        filepath        = saved_weights_name,# + '{epoch:02d}.h5', 
        monitor         = 'loss', 
        verbose         = 1, 
        save_best_only  = True, 
        mode            = 'min', 
        period          = 1
    )
    reduce_on_plateau = ReduceLROnPlateau(
        monitor  = 'loss',
        factor   = 0.1,
        patience = 2,
        verbose  = 1,
        mode     = 'min',
        epsilon  = 0.01,
        cooldown = 0,
        min_lr   = 0
    )
    tensorboard = CustomTensorBoard(
        log_dir                = tensorboard_logs,
        write_graph            = True,
        write_images           = True,
    )    
    return [early_stop, checkpoint, reduce_on_plateau, tensorboard]
Пример #5
0
def _main_(args):
    config_path = args.conf
    input_path = args.input
    output_path = args.output

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    makedirs(output_path)

    ###############################
    #   Set some parameter
    ###############################
    net_h, net_w = 416, 416  # a multiple of 32, the smaller the faster
    obj_thresh, nms_thresh = 0.35, 0.35

    ###############################
    #   Load the model
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    infer_model = load_model(config['train']['saved_weights_name'])

    ###############################
    #   Predict bounding boxes
    ###############################
    image_paths = []

    if os.path.isdir(input_path):
        for inp_file in os.listdir(input_path):
            image_paths += [input_path + inp_file]
    else:
        image_paths += [input_path]

    image_paths = [
        inp_file for inp_file in image_paths
        if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])
    ]

    # the main loop
    for image_path in image_paths:
        image = cv2.imread(image_path)
        print(image_path)

        # predict the bounding boxes
        boxes = get_yolo_boxes(infer_model, [image], net_h, net_w,
                               config['model']['anchors'], obj_thresh,
                               nms_thresh)[0]

        filename = output_path + image_path.split('/')[-1] + ".pkl"
        outfile = open(filename, 'wb')
        pickle.dump(boxes, outfile)
        outfile.close()

        # draw bounding boxes on the image using labels
        draw_boxes(image, boxes, config['model']['labels'], obj_thresh)

        # write the image with bounding boxes to file
        cv2.imwrite(output_path + image_path.split('/')[-1], np.uint8(image))
Пример #6
0
def create_callbacks(saved_weights_name, tensorboard_logs, model_to_save):
    makedirs(tensorboard_logs)

    early_stop = EarlyStopping(monitor='loss',
                               min_delta=0.01,
                               patience=7,
                               mode='min',
                               verbose=1)
    checkpoint = CustomModelCheckpoint(
        model_to_save=model_to_save,
        filepath=saved_weights_name,  # + '{epoch:02d}.h5', 
        monitor='loss',
        verbose=1,
        save_best_only=True,
        mode='min',
        period=1)
    reduce_on_plateau = ReduceLROnPlateau(monitor='loss',
                                          factor=0.1,
                                          patience=2,
                                          verbose=1,
                                          mode='min',
                                          epsilon=0.01,
                                          cooldown=0,
                                          min_lr=0)
    tensorboard = CustomTensorBoard(
        log_dir=tensorboard_logs,
        write_graph=True,
        write_images=True,
    )
    # Learning rate reduce
    reduce_on_plateau = ReduceLROnPlateau(
        monitor="loss",  # val_accuracy, loss
        factor=0.1,
        patience=7,
        verbose=1,
        mode="min",
        cooldown=0,
        min_lr=0,
        min_delta=0.01,
    )

    # Tensorboard log callback
    tb_cb = tf.keras.callbacks.TensorBoard(log_dir=tensorboard_logs,
                                           histogram_freq=0,
                                           write_graph=True,
                                           profile_batch=0)

    # Model checkpoint callback
    checkpoint_cb = ModelCheckpoint(saved_weights_name,
                                    monitor="loss",
                                    verbose=1,
                                    save_best_only=True,
                                    save_weights_only=True,
                                    mode="min",
                                    period=1)

    return [reduce_on_plateau, checkpoint_cb]
    def train_data(self, gen, total=10000):

        weights_path = os.path.join(self.tuner_dir, 'final-weights.pkl')
        if os.path.isfile(weights_path):
            with open(weights_path, 'rb') as fl:
                res = pickle.load(fl)
                logger.log_info('loaded weights from', weights_path,
                                highlight=4)
                return res

        logger.start()
        iodata_path = os.path.join(self.tuner_dir, 'iodata.pkl')
        if os.path.isfile(iodata_path):
            with open(iodata_path, 'rb') as fl:
                X, Y = pickle.load(fl)
                logger.log_info('loaded io data from', iodata_path,
                                highlight=4)
        else:
            logger.log_info('constructing iodata', highlight=4)
            X = []
            Y = []
            sz = 0
            last = None
            gen = take_first_n(gen, total)
            # chunksize = (total + 4 * NUM_THREADS - 1) // (4 * NUM_THREADS)
            if NUM_THREADS > 1:
                with closing(multiprocessing.Pool(NUM_THREADS, maxtasksperchild=4)) as pool:
                    for i, (x, y) in tqdm(enumerate(pool.imap(
                            self.get_data, gen)), total=total):
                        X.append(x)
                        Y.append(y)
                        sz += len(x)
                        if last is None or sz > last + 50000:
                            logger.log_debug('%d/%d' % (i + 1, total), "with",
                                             sz, "examples so far..")
                            last = sz
                    pool.close()
                    pool.join()
            else:
                for i, (x, y) in tqdm(enumerate(map(
                        self.get_data, gen)), total=total):
                    X.append(x)
                    Y.append(y)
                    sz += len(x)
                    if last is None or sz > last + 50000:
                        logger.log_debug('%d/%d' % (i + 1, total), "with",
                                         sz, "examples so far..")
                        last = sz
            X = np.concatenate(X, axis=0)
            Y = np.concatenate(Y, axis=0)
            makedirs(iodata_path)
            with open(iodata_path, 'wb') as fl:
                pickle.dump((X, Y), fl)
                logger.log_info('dumping io data into', iodata_path,
                                highlight=4)
        assert self.fix_delimiters_only, "The possibilities are not handled for non delimiters"
        return self.fit(X, Y)
    def predict(self, pic):
        ret = []

        with open(self.config_path) as config_buffer:
            config = json.load(config_buffer)

        makedirs(self.output_path)

        ###############################
        #   Set some parameter
        ###############################
        net_h, net_w = 416, 416  # a multiple of 32, the smaller the faster
        obj_thresh, nms_thresh = 0.7, 0.45

        ###############################
        #   Load the model
        ###############################
        os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
        infer_model = load_model(config['train']['saved_weights_name'])

        image_paths = []
        if os.path.isdir(self.input_path):
            for inp_file in os.listdir(self.input_path):
                image_paths += [self.input_path + inp_file]
        else:
            image_paths += [self.input_path]

        image_paths = [
            inp_file for inp_file in image_paths
            if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])
        ]

        # the main loop
        # for image_path in image_paths:
        image = cv2.imread(pic)
        print(pic)

        # predict the bounding boxes
        boxes = get_yolo_boxes(infer_model, [image], net_h, net_w,
                               config['model']['anchors'], obj_thresh,
                               nms_thresh)[0]
        for box in boxes:
            for i in range(len(config['model']['labels'])):
                if box.classes[i] > obj_thresh:
                    ret.append([config['model']['labels'][i], box.classes[i]])
                    print(config['model']['labels'][i], " ", box.classes[i])

        # draw bounding boxes on the image using labels
        draw_boxes(image, boxes, config['model']['labels'], obj_thresh)

        # write the image with bounding boxes to file
        print("file save to {}".format(self.output_path + pic.split('/')[-1]))
        cv2.imwrite(self.output_path + pic.split('/')[-1], np.uint8(image))

        return ret
Пример #9
0
def create_callbacks(saved_weights_name, tensorboard_logs, model_to_save, valid_generator, config):
    makedirs(tensorboard_logs)
    
    early_stop = EarlyStopping(
        monitor     = 'loss', 
        min_delta   = 0.01, 
        patience    = 5, 
        mode        = 'min', 
        verbose     = 1
    )

    checkpoint = CustomModelCheckpoint(
        model_to_save   = model_to_save,
        filepath        = saved_weights_name + '-epoch_{epoch:02d}-val_loss_{val_loss:.04f}.h5',
        monitor         = 'val_loss',
        verbose         = 1, 
        save_best_only  = True, 
        mode            = 'min', 
        period          = 1
    )

    my_checkpoint = My_Checkpoint(
        model_to_save=model_to_save,
        valid_generator = valid_generator,
        config = config
    )

    only_one_checkpoint = CustomModelCheckpoint(
        model_to_save=model_to_save,
        filepath=saved_weights_name + '.h5',
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        mode='min',
        period=1
    )

    reduce_on_plateau = ReduceLROnPlateau(
        monitor  = 'loss',
        factor   = 0.1,
        patience = 2,
        verbose  = 1,
        mode     = 'min',
        epsilon  = 0.01,
        cooldown = 0,
        min_lr   = 0
    )
    # tensorboard = CustomTensorBoard(
    #     log_dir                = tensorboard_logs,
    #     write_graph            = True,
    #     write_images           = True,
    # )
    return [early_stop, checkpoint, only_one_checkpoint, reduce_on_plateau, my_checkpoint]
Пример #10
0
def _main_(config_path, input_path, output_path):
    with open(config_path) as config_buffer:
        config = json.load(config_buffer)
    makedirs(output_path)

    ###############################
    #   Set some parameter
    ###############################
    net_h, net_w = 416, 416  # a multiple of 32, the smaller the faster
    obj_thresh, nms_thresh = 0.5, 0.45

    ###############################
    #   Load the model
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    infer_model = load_model(settings.BASE_DIR +
                             config['train']['saved_weights_name'])

    ###############################
    #   Predict bounding boxes
    ###############################

    # do detection on an image or a set of images
    image_paths = []

    if os.path.isdir(input_path):
        for inp_file in os.listdir(input_path):
            image_paths += [input_path + inp_file]
    else:
        image_paths += [input_path]
    image_paths = [
        inp_file for inp_file in image_paths
        if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])
    ]
    # the main loop
    for image_path in image_paths:
        image = cv2.imread(image_path)
        # predict the bounding boxes
        boxes = get_yolo_boxes(infer_model, [image], net_h, net_w,
                               config['model']['anchors'], obj_thresh,
                               nms_thresh)[0]

        # draw bounding boxes on the image using labels
        image_new, label_result = draw_boxes(image, boxes,
                                             config['model']['labels'],
                                             obj_thresh)
        # write the image with bounding boxes to file
        cv2.imwrite(output_path + image_path.split('/')[-1],
                    np.uint8(image_new))
        return label_result, output_path + image_path.split('/')[-1]
    def fit(self, X, Y):

        weights_path = os.path.join(self.tuner_dir, 'final-weights.pkl')
        logger.log_debug(X.shape, Y.shape, np.unique(Y, return_counts=True))
        #logger.log_debug("\n", np.round(X.mean(axis=0), 5))
        #logger.log_debug("\n", np.round(X.std(axis=0), 5))
        msk_dels = (Y[:, 0] == 0) | (Y[:, 0] == 1)
        msk_adds = (Y[:, 0] == 2) | (Y[:, 0] == 3)

        X_adds = X[msk_adds]
        X_adds = X_adds[:, np.array([3, 4, 5]), ...]
        Y_adds = Y[msk_adds]
        Y_adds[Y_adds == 2] = 1
        Y_adds[Y_adds == 3] = 0
        combiner_adds = np.array(
            [[1, 0],
             [1, 0],
             [0, 1]])

        X_dels = X[msk_dels]
        X_dels = X_dels[:, np.array([0, 1, 2]), ...]
        Y_dels = Y[msk_dels]
        Y_dels[Y_dels == 0] = 1
        Y_dels[Y_dels == 1] = 0
        combiner_dels = np.array(
            [[1, 0],
             [1, 0],
             [0, 1]])

        kernel_dels, bias_dels = self.create_network(X_dels, Y_dels, combiner_dels)
        kernel_adds, bias_adds = self.create_network(X_adds, Y_adds, combiner_adds)

        weights = np.ones(X.shape[1:])
        bias = np.zeros(X.shape[1:])
        weights[np.array([0, 1, 2])] = kernel_dels
        bias[np.array([0, 1, 2])] = bias_dels

        weights[np.array([3, 4, 5])] = kernel_adds
        bias[np.array([3, 4, 5])] = bias_adds
        logger.log_debug("\n", np.round(weights, 3), "\n", np.round(bias, 3))

        #weights, bias = sparse.get_weights()
        makedirs(weights_path)
        with open(weights_path, 'wb') as fl:
            pickle.dump((weights, bias), fl)
            logger.log_info("logged weights into", weights_path, highlight=4)
        logger.log_full_report_into_file(weights_path)

        return weights, bias
Пример #12
0
def _main_(args):
    config_path = args.conf
    input_path = args.input
    output_path = args.output

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    anchors = []
    with open('anchors.json') as anchors_str:
        anchors = json.load(anchors_str)

    makedirs(output_path)

    ###############################
    #   Set some parameter
    ###############################
    net_h, net_w = 416, 416  # a multiple of 32, the smaller the faster
    obj_thresh, nms_thresh = 0.4, 0.45

    image_paths = []

    if os.path.isdir(input_path):
        for inp_file in os.listdir(input_path):
            image_paths += [input_path + inp_file]
    else:
        image_paths += [input_path]

    image_paths = [
        inp_file for inp_file in image_paths
        if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])
    ]

    infer_model = load_model(config['train']['saved_weights_name'])
    # the main loop
    for image_path in image_paths:
        image = cv2.imread(image_path)
        print(image_path)

        # predict the bounding boxes
        boxes = get_yolo_boxes(infer_model, [image], net_h, net_w, anchors,
                               obj_thresh, nms_thresh)[0]

        # draw bounding boxes on the image using labels
        draw_boxes(image, boxes, config['model']['labels'], obj_thresh)

        # write the image with bounding boxes to file
        cv2.imwrite(output_path + image_path.split('/')[-1], np.uint8(image))
Пример #13
0
def create_callbacks(saved_weights_name, tensorboard_logs, model_to_save):
    makedirs(tensorboard_logs)

    early_stop = EarlyStopping(monitor='loss',
                               min_delta=0.01,
                               patience=5,
                               mode='min',
                               verbose=1)
    checkpoint = CustomModelCheckpoint(
        model_to_save=model_to_save,
        filepath=
        'log_voc/ep{epoch:03d}-loss{loss:.3f}.h5',  #saved_weights_name,# + '{epoch:02d}.h5', 
        monitor='loss',
        verbose=1,
        save_best_only=True,
        mode='min',
        period=1)
    checkpoint2 = CustomModelCheckpoint2(
        model_to_save=model_to_save,
        filepath=saved_weights_name,  # + '{epoch:02d}.h5', 
        monitor='loss',
        verbose=1,
        save_best_only=True,
        mode='min',
        period=1)
    checkpoint3 = ModelCheckpoint('log_voc/ep{epoch:03d}-loss{loss:.3f}.h5',
                                  monitor='loss',
                                  save_best_only=True,
                                  period=4)
    #checkpoint3 = ModelCheckpoint('log_voc/ep{epoch:03d}-loss{loss:.3f}.h5', monitor='loss', save_weights_only=True, save_best_only=True, period=4)
    #checkpoint2 not working yet for some reason... 1/15/2020

    reduce_on_plateau = ReduceLROnPlateau(monitor='loss',
                                          factor=0.1,
                                          patience=2,
                                          verbose=1,
                                          mode='min',
                                          epsilon=0.01,
                                          cooldown=0,
                                          min_lr=0)
    tensorboard = CustomTensorBoard(
        log_dir=tensorboard_logs,
        write_graph=True,
        write_images=True,
    )
    return [checkpoint, checkpoint2, tensorboard]
Пример #14
0
def create_callbacks(saved_weights_name, tensorboard_logs, model_to_save):
    makedirs(tensorboard_logs)

    logging.debug("Adding callback (Early Stopping)")
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0.1,
                               patience=8,
                               mode='min',
                               verbose=1,
                               restore_best_weights=True)
    logging.debug("Adding callback (CustomModelCheckpoint)")
    checkpoint = CustomModelCheckpoint(
        model_to_save=model_to_save,
        filepath=saved_weights_name,  # + '{epoch:02d}.h5', 
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        mode='min',
        period=1)
    logging.debug("Adding callback (ReduceLROnPlateau)")
    reduce_on_plateau = ReduceLROnPlateau(monitor='val_loss',
                                          factor=0.2,
                                          patience=5,
                                          verbose=1,
                                          mode='min',
                                          epsilon=0.01,
                                          cooldown=3,
                                          min_lr=0.0)
    logging.debug("Adding callback (CustomTensorBoard)")
    tensorboard = CustomTensorBoard(
        log_dir=tensorboard_logs,
        write_graph=True,
        write_images=True,
    )

    logging.debug("Adding callback (TerminateOnNaN)")
    nan = tf.keras.callbacks.TerminateOnNaN()

    logging.debug("Adding callback (LRTensorBoard)")
    lr = LRTensorBoard(log_dir=tensorboard_logs)

    return [early_stop, checkpoint, reduce_on_plateau, tensorboard, nan, lr]
Пример #15
0
def create_callbacks(model_to_save, config, valid_generator):
    saved_weights_name = config['train']['saved_weights_name']
    tensorboard_logs = config['train']['tensorboard']["log_dir"]
    makedirs(tensorboard_logs)
    early_stop = EarlyStopping(monitor='loss',
                               min_delta=0.01,
                               patience=5,
                               mode='min',
                               verbose=1)
    checkpoint = CustomModelCheckpoint(
        model_to_save=model_to_save,
        filepath=saved_weights_name,  # + '{epoch:02d}.h5', 
        monitor='loss',
        verbose=1,
        save_best_only=True,
        mode='min',
        period=1)
    reduce_on_plateau = ReduceLROnPlateau(monitor='loss',
                                          factor=0.1,
                                          patience=2,
                                          verbose=1,
                                          mode='min',
                                          epsilon=0.01,
                                          cooldown=0,
                                          min_lr=0)
    tensorboard = CustomTensorBoard(
        log_dir=tensorboard_logs,
        write_graph=True,
        write_images=True,
    )
    tensorboardImage = TensorBoardImage(
        tag=config["train"]["tensorboard"]["tag"],
        labels=config["model"]["labels"],
        infer_model=model_to_save,
        valid_generator=valid_generator,
        log_dir=tensorboard_logs,
        anchors=config["model"]["anchors"],
        img_count=config["train"]["tensorboard"]["img_count"])
    return [
        early_stop, checkpoint, reduce_on_plateau, tensorboard,
        tensorboardImage
    ]
Пример #16
0
def rankGame(inputPath, draw_output=False):

    with open(configPath) as config_buffer:
        config = json.load(config_buffer)

    makedirs(outputPath)

    ###############################
    #   Set some parameter
    ###############################
    net_h, net_w = 416, 416  # a multiple of 32, the smaller the faster
    obj_thresh, nms_thresh = 0.5, 0.45

    ###############################
    #   Load the model
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    infer_model = load_model(
        os.path.join(curDir, config['train']['saved_weights_name']))

    image = cv2.imread(inputPath)

    # predict the bounding boxes
    boxes = get_yolo_boxes(infer_model, [image], net_h, net_w,
                           config['model']['anchors'], obj_thresh,
                           nms_thresh)[0]
    # Sort boxes by x position sort in ascending player order
    sortedBoxes = sorted(boxes, key=lambda box: box.xmin)

    # draw bounding boxes on the image using labels
    if draw_output:
        draw_boxes(image, boxes, config['model']['labels'], obj_thresh)
        # write the image with bounding boxes to file
        # cv2.imwrite(outputPath.split('/')[-1], np.uint8(image))
        cv2.imshow('Annotated Image', image)
        cv2.waitKey(0)
    # Assign ranks to the boxes based on their labels
    rankedBoxes = rankBoxes(image, boxes, config['model']['labels'],
                            obj_thresh)
    sortedRanks = [box.playerRank for box in rankedBoxes]
    return sortedRanks  # Returns a list of the integer ranks in order of ascending player number
Пример #17
0
def create_callbacks(saved_weights_name, tensorboard_logs, model_to_save,
                     textedit, cure, loss):
    makedirs(tensorboard_logs)

    early_stop = EarlyStopping(monitor='loss',
                               min_delta=0.01,
                               patience=5,
                               mode='min',
                               verbose=1)
    checkpoint = CustomModelCheckpoint(
        textedit,
        cure,
        loss,
        model_to_save=model_to_save,
        filepath='YOLO3_{epoch:02d}.h5',  #saved_weights_name +, 
        monitor='loss',
        verbose=1,
        #save_best_only  = True,
        save_weights_only=False,
        mode='min',
        period=5)
    reduce_on_plateau = ReduceLROnPlateau(monitor='loss',
                                          factor=0.5,
                                          patience=8,
                                          verbose=1,
                                          mode='min',
                                          epsilon=0.01,
                                          cooldown=0,
                                          min_lr=0)
    tensorboard = CustomTensorBoard(
        log_dir=tensorboard_logs,
        write_graph=True,
        write_images=True,
    )
    return [  #early_stop, 
        checkpoint,
        #reduce_on_plateau,
        tensorboard
    ]
Пример #18
0
def main(args):
    utils.makedirs(args.save_dir)
    if args.print_to_log:
        sys.stdout = open(f'{args.save_dir}/log.txt', 'w')

    t.manual_seed(seed)
    if t.cuda.is_available():
        t.cuda.manual_seed_all(seed)

    device = t.device('cuda' if t.cuda.is_available() else 'cpu')

    model_cls = F if args.uncond else CCF
    f = model_cls(args.depth, args.width, args.norm)
    print(f"loading model from {args.load_path}")

    # load em up
    ckpt_dict = t.load(args.load_path)
    f.load_state_dict(ckpt_dict["model_state_dict"])
    replay_buffer = ckpt_dict["replay_buffer"]

    f = f.to(device)

    if args.eval == "OOD":
        OODAUC(f, args, device)

    if args.eval == "test_clf":
        test_clf(f, args, device)

    if args.eval == "cond_samples":
        cond_samples(f, replay_buffer, args, device, args.fresh_samples)

    if args.eval == "uncond_samples":
        uncond_samples(f, args, device)

    if args.eval == "logp_hist":
        logp_hist(f, args, device)
Пример #19
0
def main(args, config):
    if args.mode == "preprocess":
        from preprocess import preprocess

        try:
            utils.makedirs("data/", raise_error=True)
        except Exception:
            print(
                f"Warning : data folder already exists. Some data may get overwritten"
            )
        print("Running preprocessing")
        preprocess(args, config)

    elif args.mode == "train":
        try:
            utils.makedirs(args.output_folder, raise_error=True)
        except Exception:
            print(
                f"Warning : {args.output_folder} already exists. Some data may get overwritten"
            )
        shutil.rmtree(f"{args.output_folder}/src", ignore_errors=True)
        shutil.copytree("src/", f"{args.output_folder}/src")

        shutil.copy(args.config_file, args.output_folder)
        train(args, config)

    elif args.mode == "test":
        if args.model_file is None or args.test_file is None:
            print("Error: Provide model_file and test_file file")
            sys.exit(1)

        try:
            utils.makedirs(args.output_folder, raise_error=True)
        except Exception:
            print(
                f"Warning : {args.output_folder} already exists. Some data may get overwritten"
            )
        test(args, config)
Пример #20
0
 def save_model(self, save_path=None):
     if save_path is None:
         save_path = self.model_save_path
     makedirs(save_path)
     self.model.save(save_path)
     logger.log_info(save_path, 'saved..')
Пример #21
0
def _main_(args):
    config_path  = args.conf
    input_path   = args.input
    output_path  = args.output

    with open(config_path) as config_buffer:    
        config = json.load(config_buffer)

    makedirs(output_path)

    ###############################
    #   Set some parameter
    ###############################       
    net_h, net_w = 416, 416 # a multiple of 32, the smaller the faster
    obj_thresh, nms_thresh = 0.5, 0.45

    ###############################
    #   Load the model
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    infer_model = load_model(config['train']['saved_weights_name'])

    ###############################
    #   Predict bounding boxes 
    ###############################
    if 'webcam' in input_path: # do detection on the first webcam
        video_reader = cv2.VideoCapture(0)

        # the main loop
        batch_size  = 1
        images      = []
        while True:
            ret_val, image = video_reader.read()
            if ret_val == True: images += [image]

            if (len(images)==batch_size) or (ret_val==False and len(images)>0):
                batch_boxes = get_yolo_boxes(infer_model, images, net_h, net_w, config['model']['anchors'], obj_thresh, nms_thresh)

                for i in range(len(images)):
                    draw_boxes(images[i], batch_boxes[i], config['model']['labels'], obj_thresh) 
                    cv2.imshow('video with bboxes', images[i])
                images = []
            if cv2.waitKey(1) == 27: 
                break  # esc to quit
        cv2.destroyAllWindows()        
    elif input_path[-4:] == '.mp4': # do detection on a video  
        video_out = output_path + input_path.split('/')[-1]
        video_reader = cv2.VideoCapture(input_path)

        nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
        frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))

        video_writer = cv2.VideoWriter(video_out,
                               cv2.VideoWriter_fourcc(*'MPEG'), 
                               50.0, 
                               (frame_w, frame_h))
        # the main loop
        batch_size  = 1
        images      = []
        start_point = 0 #%
        show_window = False
        for i in tqdm(range(nb_frames)):
            _, image = video_reader.read()

            if (float(i+1)/nb_frames) > start_point/100.:
                images += [image]

                if (i%batch_size == 0) or (i == (nb_frames-1) and len(images) > 0):
                    # predict the bounding boxes
                    batch_boxes = get_yolo_boxes(infer_model, images, net_h, net_w, config['model']['anchors'], obj_thresh, nms_thresh)

                    for i in range(len(images)):
                        # draw bounding boxes on the image using labels
                        draw_boxes(images[i], batch_boxes[i], config['model']['labels'], obj_thresh)   

                        # show the video with detection bounding boxes          
                        if show_window: cv2.imshow('video with bboxes', images[i])  

                        # write result to the output video
                        video_writer.write(images[i]) 
                    images = []
                if show_window and cv2.waitKey(1) == 27: break  # esc to quit

        if show_window: cv2.destroyAllWindows()
        video_reader.release()
        video_writer.release()       
    else: # do detection on an image or a set of images
        image_paths = []

        if os.path.isdir(input_path): 
            for inp_file in os.listdir(input_path):
                image_paths += [input_path + inp_file]
        else:
            image_paths += [input_path]

        image_paths = [inp_file for inp_file in image_paths if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])]

        # the main loop
        for image_path in image_paths:
            image = cv2.imread(image_path)
            print(image_path)

            # predict the bounding boxes
            boxes = get_yolo_boxes(infer_model, [image], net_h, net_w, config['model']['anchors'], obj_thresh, nms_thresh)[0]

            # draw bounding boxes on the image using labels
            draw_boxes(image, boxes, config['model']['labels'], obj_thresh) 
     
            # write the image with bounding boxes to file
            cv2.imwrite(output_path + image_path.split('/')[-1], np.uint8(image))         
Пример #22
0
def _main_(args):
    config_path = args.conf
    input_path = args.input
    output_path = args.output

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    makedirs(output_path)

    ###############################
    #   Set some parameter
    ###############################
    net_h, net_w = 416, 416  # a multiple of 32, the smaller the faster
    obj_thresh, nms_thresh = 0.5, 0.45

    ###############################
    #   Load the model
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    infer_model = load_model(config['train']['saved_weights_name'])

    ###############################
    #   Predict bounding boxes
    ###############################
    if 'webcam' in input_path:  # do detection on the first webcam
        video_reader = cv2.VideoCapture(0)

        # the main loop
        batch_size = 1
        images = []
        while True:
            ret_val, image = video_reader.read()
            if ret_val == True: images += [image]

            if (len(images) == batch_size) or (ret_val == False
                                               and len(images) > 0):
                batch_boxes = get_yolo_boxes(infer_model, images, net_h, net_w,
                                             config['model']['anchors'],
                                             obj_thresh, nms_thresh)

                for i in range(len(images)):
                    draw_boxes(images[i], batch_boxes[i],
                               config['model']['labels'], obj_thresh)
                    cv2.imshow('video with bboxes', images[i])
                images = []
            if cv2.waitKey(1) == 27:
                break  # esc to quit
        cv2.destroyAllWindows()
    elif input_path[-4:] == '.mp4' or input_path[
            -4:] == '.AVI':  # do detection on a video
        video_out = output_path + input_path.split('/')[-1].replace(
            '.AVI', '.mp4')
        video_reader = cv2.VideoCapture(input_path)

        nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
        frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))

        video_writer = cv2.VideoWriter(
            video_out,
            cv2.VideoWriter_fourcc(*'MP4V'),  #(*'MPEG'), 
            50.0,
            (frame_w, frame_h))
        # the main loop
        batch_size = 1
        images = []
        start_point = 0  #%
        show_window = False
        for i in tqdm(range(nb_frames)):
            _, image = video_reader.read()

            if (float(i + 1) / nb_frames) > start_point / 100.:
                images += [image]

                if (i % batch_size == 0) or (i == (nb_frames - 1)
                                             and len(images) > 0):
                    # predict the bounding boxes
                    batch_boxes = get_yolo_boxes(infer_model, images, net_h,
                                                 net_w,
                                                 config['model']['anchors'],
                                                 obj_thresh, nms_thresh)

                    for i in range(len(images)):
                        bbox0 = [batch_boxes[i][0]] if len(
                            batch_boxes[i]) else []
                        # draw bounding boxes on the image using labels
                        #draw_boxes(images[i], batch_boxes[i], config['model']['labels'], obj_thresh)
                        draw_boxes(images[i], bbox0, config['model']['labels'],
                                   obj_thresh)  # take only 1st bbox

                        # show the video with detection bounding boxes
                        if show_window:
                            cv2.imshow('video with bboxes', images[i])

                        # write result to the output video
                        video_writer.write(images[i])
                    images = []
                if show_window and cv2.waitKey(1) == 27: break  # esc to quit

        if show_window: cv2.destroyAllWindows()
        video_reader.release()
        video_writer.release()
    else:  # do detection on an image or a set of images
        image_paths = []

        if os.path.isdir(input_path):
            for inp_file in os.listdir(input_path):
                image_paths += [input_path + inp_file]
        else:
            image_paths += [input_path]

        image_paths = [
            inp_file for inp_file in image_paths
            if (inp_file[-4:] in ['.jpg', '.png', 'JPEG', '.JPG'])
        ]

        # the main loop
        for image_path in image_paths:
            #image_path = '/dataset/RZSS_images/1_animal_empty_r/animal/6.JPG'
            image = cv2.imread(image_path)

            # predict the bounding boxes
            boxes = get_yolo_boxes(infer_model, [image], net_h, net_w,
                                   config['model']['anchors'], obj_thresh,
                                   nms_thresh)[0]
            if len(boxes) > 0:
                pboxes = np.array(
                    [[box.xmin, box.ymin, box.xmax, box.ymax,
                      box.get_score()] for box in boxes])
            print(pboxes)
            #print(boxes[0].xmin, boxes[0].ymin, boxes[0].xmax, boxes[0].ymax, boxes[0].c, boxes[0].classes );
            #for k in range(len(boxes)): print(boxes[k].__dict__); import sys; sys.exit(0)

            # draw bounding boxes on the image using labels
            draw_boxes(image, boxes, config['model']['labels'], obj_thresh)
            #import sys; sys.exit(0)
            # write the image with bounding boxes to file
            cv2.imwrite(output_path + image_path.split('/')[-1],
                        np.uint8(image))
            print('OUTPUT SAVED AS ' + output_path + image_path.split('/')[-1])
Пример #23
0
import json
import cv2
from utils.utils import get_yolo_boxes, makedirs
from utils.bbox import draw_boxes
from keras.models import load_model
from tqdm import tqdm
import numpy as np

    config_path  = args.conf
    input_path   = args.input
    output_path  = args.output

    with open(config_path) as config_buffer:    
        config = json.load(config_buffer)

    makedirs(output_path)

    ###############################
    #   Set some parameter
    ###############################       
    net_h, net_w = 416, 416 # a multiple of 32, the smaller the faster
    obj_thresh, nms_thresh = 0.5, 0.45

    ###############################
    #   Load the model
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    infer_model = load_model(config['train']['saved_weights_name'])

    ###############################
    #   Predict bounding boxes 
Пример #24
0
def _main_(args):
    config_path = args.conf
    input_path = args.input
    output_path = args.output

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    makedirs(output_path)

    ###############################
    #   Set some parameter
    ###############################
    downsample = 32  # ratio between network input's size and network output's size, 32 for YOLOv3

    net_h, net_w = 416, 416  # a multiple of 32, the smaller the faster

    # this only works for squared images
    if config['model']['min_input_size'] == config['model']['max_input_size']:
        net_w = config['model']['min_input_size'] // downsample * downsample
        net_h = config['model']['min_input_size'] // downsample * downsample

    obj_thresh = config['train']['ignore_thresh']

    nms_thresh = 0.45

    if config['valid']['duplicate_thresh']:
        nms_thresh = config['valid']['duplicate_thresh']

    ###############################
    #   Load the model
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    infer_model = load_model(config['train']['saved_weights_name'])

    ###############################
    #   Predict bounding boxes
    ###############################
    if 'webcam' in input_path:  # do detection on the first webcam
        video_reader = cv2.VideoCapture(0)

        # the main loop
        batch_size = 1
        images = []
        while True:
            ret_val, image = video_reader.read()
            if ret_val == True: images += [image]

            if (len(images) == batch_size) or (ret_val == False
                                               and len(images) > 0):
                batch_boxes = get_yolo_boxes(infer_model, images, net_h, net_w,
                                             config['model']['anchors'],
                                             obj_thresh, nms_thresh)

                for i in range(len(images)):
                    draw_boxes(images[i], batch_boxes[i],
                               config['model']['labels'], obj_thresh)
                    cv2.imshow('video with bboxes', images[i])
                images = []
            if cv2.waitKey(1) == 27:
                break  # esc to quit
        cv2.destroyAllWindows()
    elif input_path[-4:] == '.mp4':  # do detection on a video
        video_out = output_path + input_path.split('/')[-1]
        video_reader = cv2.VideoCapture(input_path)

        nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
        frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))

        video_writer = cv2.VideoWriter(video_out,
                                       cv2.VideoWriter_fourcc(*'MPEG'), 50.0,
                                       (frame_w, frame_h))
        # the main loop
        batch_size = 1
        images = []
        start_point = 0  #%
        show_window = False
        for i in tqdm(range(nb_frames)):
            _, image = video_reader.read()

            if (float(i + 1) / nb_frames) > start_point / 100.:
                images += [image]

                if (i % batch_size == 0) or (i == (nb_frames - 1)
                                             and len(images) > 0):
                    # predict the bounding boxes
                    batch_boxes = get_yolo_boxes(infer_model, images, net_h,
                                                 net_w,
                                                 config['model']['anchors'],
                                                 obj_thresh, nms_thresh)

                    for i in range(len(images)):
                        # draw bounding boxes on the image using labels
                        draw_boxes(images[i], batch_boxes[i],
                                   config['model']['labels'], obj_thresh)

                        # show the video with detection bounding boxes
                        if show_window:
                            cv2.imshow('video with bboxes', images[i])

                        # write result to the output video
                        video_writer.write(images[i])
                    images = []
                if show_window and cv2.waitKey(1) == 27: break  # esc to quit

        if show_window: cv2.destroyAllWindows()
        video_reader.release()
        video_writer.release()
    else:  # do detection on an image or a set of images
        image_paths = []

        if os.path.isdir(input_path):
            for inp_file in os.listdir(input_path):
                image_paths += [input_path + inp_file]
        else:
            image_paths += [input_path]

        image_paths = [
            inp_file for inp_file in image_paths
            if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])
        ]

        # the main loop
        for image_i, image_path in enumerate(image_paths):
            image = cv2.imread(image_path)

            if image_i > 0 and image_i % 50 == 0:
                print(
                    'predicted {:4} images out of {:4} images in total'.format(
                        image_i, len(image_paths)))

            # predict the bounding boxes
            boxes = get_yolo_boxes(infer_model, [image], net_h, net_w,
                                   config['model']['anchors'], obj_thresh,
                                   nms_thresh)[0]

            # draw bounding boxes on the image using labels
            draw_boxes(image, boxes, config['model']['labels'], obj_thresh)

            # write the image with bounding boxes to file
            cv2.imwrite(output_path + image_path.split('/')[-1],
                        np.uint8(image))

        print('predicted all {:4} images'.format(len(image_paths)))
Пример #25
0
# training hyperparameters
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--epochs', type=int, default=300)

# others
parser.add_argument('--seed', type=int, default=1)

args = parser.parse_args()

args.work_dir = osp.dirname(osp.realpath(__file__))
args.data_fp = osp.join(args.work_dir, 'data', args.dataset)
args.out_dir = osp.join(args.work_dir, 'out', args.exp_name)
args.checkpoints_dir = osp.join(args.out_dir, 'checkpoints')
print(args)

utils.makedirs(args.out_dir)
utils.makedirs(args.checkpoints_dir)

writer = writer.Writer(args)
device = torch.device('cuda', args.device_idx)
torch.set_num_threads(args.n_threads)

# deterministic
torch.manual_seed(args.seed)
cudnn.benchmark = False
cudnn.deterministic = True

# load dataset
template_fp = osp.join('template', 'template.obj')
meshdata = MeshData(args.data_fp,
                    template_fp,
Пример #26
0
def _main_(args):
    config_path = args.conf
    input_path = args.input
    output_path = args.output

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    makedirs(output_path)

    ###############################
    #   Set some parameter
    ###############################
    net_h, net_w = 416, 416  # a multiple of 32, the smaller the faster
    obj_thresh, nms_thresh = 0.5, 0.45

    ###############################
    #   Load the model
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    infer_model = load_model(config['train']['saved_weights_name'])

    ###############################
    #   Predict bounding boxes
    ###############################
    image_paths = []

    if os.path.isdir(input_path):
        for inp_file in os.listdir(input_path):
            image_paths += [input_path + inp_file]
    else:
        image_paths += [input_path]

    image_paths = sorted([
        inp_file for inp_file in image_paths
        if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])
    ])

    # the main loop
    with open(os.path.join(output_path, "ans.xml"), "w") as file:
        file.write("<annotations>\n")
        for image_path in image_paths:
            image = cv2.imread(image_path)
            print(image_path)

            # predict the bounding boxes
            boxes = get_yolo_boxes(infer_model, [image], net_h, net_w,
                                   config['model']['anchors'], obj_thresh,
                                   nms_thresh)[0]

            # draw bounding boxes on the image using labels
            draw_boxes(image, boxes, config['model']['labels'], obj_thresh)

            filename = os.path.basename(image_path)
            write_boxes(file, filename, image, boxes,
                        config['model']['labels'], obj_thresh)

            # write the image with bounding boxes to file
            cv2.imwrite(output_path + image_path.split('/')[-1],
                        np.uint8(image))
        file.write("</annotations>\n")
Пример #27
0
def _main_(args):
    config_path = args.conf
    input_path = args.input
    output_path = args.output
    predict_path = args.predict
    if_show = args.show

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    makedirs(output_path)
    makedirs(predict_path)

    ###############################
    #   Set some parameter
    ###############################
    net_h, net_w = 512, 512  # a multiple of 32, the smaller the faster
    obj_thresh, nms_thresh = 0.5, 0.45  #0.5, 0.45

    ###############################
    #   Load the model
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    infer_model = load_model(config['train']['saved_weights_name'])
    # infer_model = load_model('backend')
    print('load model')

    ###############################
    #   Predict bounding boxes
    ###############################

    # do detection on an image or a set of images
    image_paths = []

    if os.path.isdir(input_path):
        for inp_file in os.listdir(input_path):
            image_paths += [input_path + inp_file]
    else:
        image_paths += [input_path]

    image_paths = [
        inp_file for inp_file in image_paths if (inp_file[-4:] in ['.mhd'])
    ]

    # the main loop
    for image_path in image_paths:
        print(image_path)
        slice_i = 1
        while slice_i < 1000:
            slice_i += 1
            print('slice:' + str(slice_i))
            image = raw_reader(image_path, slice_i)

            if image is None:
                break

            if if_show:
                image_ini = image[..., 2]
                max_pix = np.max(image_ini)
                min_pix = np.min(image_ini)
                # print(max_pix, min_pix)
                image_ini, _ = img_windowing(image_ini, max_pix, min_pix)
                # cv2.imshow('image_ini', image_ini)
                # cv2.waitKey()
                # image_ini = np.uint8(np.float64((image_ini + 1000) / 1800) * 255)

            (imagename,
             extension) = os.path.splitext(image_path.split('/')[-1])

            # predict the bounding boxes
            boxes = get_yolo_boxes(infer_model, [image], net_h, net_w,
                                   config['model']['anchors'], obj_thresh,
                                   nms_thresh, imagename)[0]

            line = ''

            textname = predict_path + imagename + '_' + str(slice_i) + '.txt'

            if if_show and len(boxes) > 0:
                # draw bounding boxes on the image using labels
                # print('boxes:' + str(len(boxes)))

                draw_boxes(
                    image_ini, boxes, sorted(config['model']['labels']),
                    obj_thresh,
                    output_path + imagename + '_' + str(slice_i) + '.jpg')

                # write the image with bounding boxes to file
                # cv2.imwrite(output_path + imagename + '_' + str(slice_i) + '.jpg', np.uint8(image_ini))

                newline = get_box_info(line, boxes,
                                       sorted(config['model']['labels']),
                                       obj_thresh)
                with open(textname, 'w') as f:
                    f.write(newline)
Пример #28
0
def _main_(args):
    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    ###############################
    #   Parse the annotations
    ###############################
    train_ints, valid_ints, labels, max_box_per_image = create_training_instances(
        config['train']['train_annot_folder'],
        config['train']['train_image_folder'], config['train']['cache_name'],
        config['valid']['valid_annot_folder'],
        config['valid']['valid_image_folder'], config['valid']['cache_name'],
        config['model']['labels'])
    print('\nTraining on: \t' + str(labels) + '\n')

    ###############################
    #   Create the generators
    ###############################
    train_generator = BatchGenerator(
        instances=train_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=max_box_per_image,
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.3,
        norm=normalize)

    valid_generator = BatchGenerator(
        instances=valid_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=max_box_per_image,
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.0,
        norm=normalize)

    ###############################
    #   Create the model
    ###############################
    if os.path.exists(config['train']['saved_weights_name']):
        config['train']['warmup_epochs'] = 0
    warmup_batches = config['train']['warmup_epochs'] * (
        config['train']['train_times'] * len(train_generator))

    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    multi_gpu = len(config['train']['gpus'].split(','))

    train_model, infer_model = create_model(
        nb_class=len(labels),
        anchors=config['model']['anchors'],
        max_box_per_image=max_box_per_image,
        max_grid=[
            config['model']['max_input_size'],
            config['model']['max_input_size']
        ],
        batch_size=config['train']['batch_size'],
        warmup_batches=warmup_batches,
        ignore_thresh=config['train']['ignore_thresh'],
        multi_gpu=multi_gpu,
        saved_weights_name=config['train']['saved_weights_name'],
        lr=config['train']['learning_rate'],
        grid_scales=config['train']['grid_scales'],
        obj_scale=config['train']['obj_scale'],
        noobj_scale=config['train']['noobj_scale'],
        xywh_scale=config['train']['xywh_scale'],
        class_scale=config['train']['class_scale'],
    )

    ###############################
    #   Kick off the training
    ###############################
    callbacks = create_callbacks(config['train']['saved_weights_name'],
                                 config['train']['tensorboard_dir'],
                                 infer_model)

    history = train_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=len(train_generator) *
        config['train']['train_times'],  #(train_images/4)*train_times
        epochs=config['train']['nb_epochs'] + config['train']['warmup_epochs'],
        verbose=2 if config['train']['debug'] else 1,
        callbacks=callbacks,
        workers=4,
        max_queue_size=8,
        validation_data=valid_generator,
        validation_steps=len(valid_generator) * config['valid']['valid_times'])

    # Prepare folder to save in
    time = datetime.now()
    time_str = time.strftime("%Y%m%d-%H:%M")
    cwd = os.getcwd()
    save_path = os.path.join(cwd, time_str)
    print('Data saved in: ' + save_path)
    makedirs(save_path)

    # Copy and save config file
    save_config_path = os.path.join(save_path, 'config.json')
    shutil.copy2(config_path, save_config_path)

    # plotting train data and saving mat file with data
    plot_matlab(history, config['data']['plot_png'],
                config['data']['save_mat'], save_path)

    # Save the loaded models to .TXT
    save_template_path = os.path.join(save_path, 'train_model_params.txt')
    with open(save_template_path, 'w') as fh:
        # Pass the file handle in as a lambda function to make it callable
        train_model.summary(print_fn=lambda x: fh.write(x + '\n'))

    # make a GPU version of infer_model for evaluation
    if multi_gpu > 1:
        infer_model = load_model(config['train']['saved_weights_name'])

    ###############################
    #   Run the evaluation
    ###############################
    # compute mAP for all the classes
    average_precisions = evaluate(infer_model, valid_generator)

    # print the score
    print('========== VALIDATION ==========')
    for label, average_precision in average_precisions.items():
        print(labels[label] + ': {:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))
Пример #29
0
dataformate = date.strftime("%Y-%m-%d %H:%M:%S")  #上線版本
#day='20210331'
# In[root]
currentroot = 'D:/Camy/AI_project/CELL_ASM_SealWidth_SPC_N2/code'  #絕對路徑,相對路徑os.getcwd()
os.chdir(currentroot)
#currentroot=os.getcwd()
down_img = currentroot + '/input/Downpic/'
input_img = currentroot + '/input/images/'
image_compare = currentroot + '/input/compare/'
output_img = currentroot + '/output/yoloimage/'  #/image/
output_csv = currentroot + '/output/csv/'  #/image/
#output_csv= currentroot+'/output/csv_data/'
#check_table=currentroot+'/CheckTable'
Models_path = currentroot + '/models/'
##Creat file
makedirs(down_img)
makedirs(input_img)
makedirs(image_compare)
makedirs(output_img)
makedirs(output_csv)
#makedirs(output_csv)
#makedirs(check_table)
# In[readConfig]
config = configparser.ConfigParser()
config.read('Config.ini', encoding='utf-8')  #config檔案名稱
ModelName = list(config['Setting']['model_Name'].split(','))
ModelH5File = list(config['Setting']['model_h5Nmae'].split(','))
SheetModel_Modelname_dic = {}
for d in range(len(ModelName)):
    SheetModel_Modelname_dic[ModelName[d]] = ModelH5File[d]
# In[Set Model some parameter]
Пример #30
0
def _main_(args):
    config_path = args.conf
    input_path = args.input
    output_path = args.output

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    makedirs(output_path)

    ###############################
    #   Set some parameter
    ###############################
    net_h, net_w = 416, 416  # a multiple of 32, the smaller the faster
    obj_thresh, nms_thresh = 0.5, 0.45

    ###############################
    #   Load the model
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    infer_model = load_model(config['train']['saved_weights_name'])

    ###############################
    #   Predict bounding boxes
    ###############################
    train_data = pd.DataFrame(columns=[
        'clip_id', 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x9',
        'x10', 'x11', 'x12', 'x13', 'x14', 'y0', 'y1', 'y2', 'y3', 'y4', 'y5',
        'y6', 'y7', 'y8', 'y9', 'y10', 'y11', 'y12', 'y13', 'y14'
    ])
    vid_files = [
        f for f in os.listdir(input_path) if f[-4:] in [".avi", ".mp4"]
    ]

    for num, vid_file in enumerate(vid_files):
        print(
            "#############\nProcessing on video No.{} out of {} videos: {}\n#############"
            .format(num, len(vid_files), vid_file))
        start = time.time()

        video_out = output_path + '/traj_' + vid_file.split('/')[-1]
        video_reader = cv2.VideoCapture(input_path + vid_file)

        clip_id = int("".join(re.findall("[0-9]", vid_file)))

        video_fps = int(video_reader.get(cv2.CAP_PROP_FPS))
        nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
        frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))

        # the main loop
        batch_size = 1
        images = []
        start_point = 0  #%
        show_window = False
        balls_coords = []
        balls_frms = []
        labels = config['model']['labels']
        for i in tqdm(range(nb_frames)):
            _, image = video_reader.read()

            # stack all frames
            balls_frms.append(image)

            if (float(i + 1) / nb_frames) > start_point / 100.:
                images += [image]

                if (i % batch_size == 0) or (i == (nb_frames - 1)
                                             and len(images) > 0):
                    # predict the bounding boxes
                    batch_boxes = get_yolo_boxes(infer_model, images, net_h,
                                                 net_w,
                                                 config['model']['anchors'],
                                                 obj_thresh, nms_thresh)

                    for k in range(len(images)):
                        # stack all ball coords
                        coords = get_coord(images[k], batch_boxes[k], labels,
                                           obj_thresh, i)  # i: frame no
                        balls_coords = balls_coords + coords

                        # show the video after above operations
                        if show_window:
                            cv2.imshow('video with bboxes', images[k])

                    images = []
                if show_window and cv2.waitKey(1) == 27: break  # esc to quit

        # track trajectory
        x_output, y_output = pitch_predict(balls_coords, balls_frms, video_out,
                                           video_fps, frame_w, frame_h)

        record = [clip_id] + x_output + y_output
        if len(record) > 1:
            train_data.loc[train_data.shape[0] - 1, :] = record
            train_data.to_csv("output-0614-1.csv", index=False)

        if show_window: cv2.destroyAllWindows()
        video_reader.release()

        print('\n>> Time spent on video No.{}, {}: {}seconds.'.format(
            num, vid_file,
            time.time() - start))
Пример #31
0
def _main_(args):
    config_path = args.conf
    input_path = args.input
    output_path = args.output

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    makedirs(output_path)

    ###############################
    #   Set some parameter
    ###############################
    net_h, net_w = 416, 416  # a multiple of 32, the smaller the faster
    obj_thresh, nms_thresh = 0.5, 0.45

    ###############################
    #   Load the model
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    infer_model = load_model(config['train']['saved_weights_name'])

    json_file = open('model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    loaded_model.load_weights("model.h5")
    label_map = np.load('label_map.npy').item()
    ###############################
    #   Predict bounding boxes
    ###############################
    if 'webcam' in input_path:  # do detection on the first webcam
        video_reader = cv2.VideoCapture(0)

        # the main loop
        batch_size = 1
        images = []
        while True:
            ret_val, image = video_reader.read()
            if ret_val == True:
                images += [image]

            if (len(images) == batch_size) or (ret_val == False
                                               and len(images) > 0):
                batch_boxes = get_yolo_boxes(infer_model, images, net_h, net_w,
                                             config['model']['anchors'],
                                             obj_thresh, nms_thresh)

                for i in range(len(images)):
                    draw_boxes(images[i], batch_boxes[i],
                               config['model']['labels'], obj_thresh)
                    cv2.imshow('video with bboxes', images[i])
                images = []
            if cv2.waitKey(1) == 27:
                break  # esc to quit
        cv2.destroyAllWindows()
    elif input_path[-4:] == '.mp4':  # do detection on a video
        video_out = output_path + input_path.split('/')[-1]
        video_reader = cv2.VideoCapture(input_path)

        nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
        frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))

        video_writer = cv2.VideoWriter(video_out,
                                       cv2.VideoWriter_fourcc(*'MPEG'), 50.0,
                                       (frame_w, frame_h))
        # the main loop
        batch_size = 1
        images = []
        start_point = 0  # %
        show_window = False
        for i in tqdm(range(nb_frames)):
            _, image = video_reader.read()

            if (float(i + 1) / nb_frames) > start_point / 100.:
                images += [image]

                if (i % batch_size == 0) or (i == (nb_frames - 1)
                                             and len(images) > 0):
                    # predict the bounding boxes
                    batch_boxes = get_yolo_boxes(infer_model, images, net_h,
                                                 net_w,
                                                 config['model']['anchors'],
                                                 obj_thresh, nms_thresh)

                    for i in range(len(images)):
                        # draw bounding boxes on the image using labels
                        draw_boxes(images[i], batch_boxes[i], loaded_model,
                                   label_map, config['model']['labels'],
                                   obj_thresh)

                        # show the video with detection bounding boxes
                        if show_window:
                            cv2.imshow('video with bboxes', images[i])

                        # write result to the output video
                        video_writer.write(images[i])
                    images = []
                if show_window and cv2.waitKey(1) == 27:
                    break  # esc to quit

        if show_window:
            cv2.destroyAllWindows()
        video_reader.release()
        video_writer.release()
    else:  # do detection on an image or a set of images
        image_paths = []

        if os.path.isdir(input_path):
            for inp_file in os.listdir(input_path):
                print(input_path + inp_file)
                image_paths += [input_path + inp_file]
        else:
            image_paths += [input_path]

        image_paths = [
            inp_file for inp_file in image_paths
            if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])
        ]

        # the main loop
        for image_path in tqdm(image_paths):
            # print(image_path)
            image = cv2.imread(image_path)

            # predict the bounding boxes
            boxes = get_yolo_boxes(infer_model, [image], net_h, net_w,
                                   config['model']['anchors'], obj_thresh,
                                   nms_thresh)[0]

            # draw bounding boxes on the image using labels
            draw_boxes(image, boxes, loaded_model, label_map,
                       sorted(config['model']['labels']), obj_thresh)

            # write the image with bounding boxes to file
            cv2.imwrite(output_path + image_path.split('/')[-1],
                        np.uint8(image))
Пример #32
0
def work(textedit, pic_label, input, model, output):
    input = input + "/"
    output = output + "/"
    argparser = argparse.ArgumentParser(
        description='Predict with a trained yolo model')
    argparser.add_argument('-c',
                           '--conf',
                           help='path to configuration file',
                           default='config1.json')
    argparser.add_argument(
        '-i',
        '--input',
        help='path to an image, a directory of images, a video, or webcam',
        default=input)
    argparser.add_argument('-o',
                           '--output',
                           default=output,
                           help='path to output directory')
    args = argparser.parse_args()
    #_main_(args)
    config_path = args.conf
    input_path = args.input
    output_path = args.output

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    makedirs(output_path)
    ###############################
    #   Set some parameter
    ###############################
    net_h, net_w = 416, 416  # a multiple of 32, the smaller the faster
    obj_thresh, nms_thresh = 0.5, 0.45
    ###############################
    #   Load the model
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    #config['train']['saved_weights_name']
    keras.backend.clear_session()
    infer_model = load_model(config['train']['saved_weights_name'])
    ###############################
    #   Predict bounding boxes
    ###############################
    image_paths = []

    if os.path.isdir(input_path):
        for inp_file in os.listdir(input_path):
            image_paths += [input_path + inp_file]
    else:
        image_paths += [input_path]

    image_paths = [
        inp_file for inp_file in image_paths
        if (inp_file[-4:] in ['.jpg', '.png', 'JPEG'])
    ]
    # the main loop
    strideN = 208  ##步长
    for image_path in image_paths:
        imageO = cv2.imread(image_path)
        print(image_path)
        image = QtGui.QPixmap(image_path)
        pic_label.setPixmap(image)
        pic_label.setScaledContents(True)

        (height, width, _) = imageO.shape
        mH = int((height - strideN) / strideN)
        mW = int((width - strideN) / strideN)

        ####对图像进行分割处理,网格搜索
        object_key = []
        object_pro = []
        object_x1 = []
        object_y1 = []
        object_x2 = []
        object_y2 = []

        for m in range(mH):
            for n in range(mW):
                print(m * mW + n)
                cursor = textedit.textCursor()
                cursor.movePosition(QtGui.QTextCursor.End)
                cursor.insertText("Detecting: " + str(m * mW + n))
                cursor.insertText("\r\n")
                # textedit.append('Elapsed time = {}'.format(time.time() - st))
                textedit.setTextCursor(cursor)
                textedit.ensureCursorVisible()
                flag = False
                '''
                cursor = textedit.textCursor()
                cursor.movePosition(QtGui.QTextCursor.End)
                cursor.insertText(str(m * mW + n))
                cursor.insertText("\r\n")
                '''
                imgCopy = imageO.copy()

                image = imgCopy[strideN * m:strideN * (m + 2),
                                strideN * n:strideN * (n + 2)]  ##height,width

                # predict the bounding boxes
                boxes = get_yolo_boxes(infer_model, [image], net_h, net_w,
                                       config['model']['anchors'], obj_thresh,
                                       nms_thresh)[0]
                # draw bounding boxes on the image using labels
                #draw_boxes(image, boxes, config['model']['labels'], obj_thresh)
                # write the image with bounding boxes to file
                #aaa=image_path.split('/')
                #aab=aaa[-1].split('.')
                #cv2.imwrite(output_path + aab[0]  + '_' + str(m*mW+n+1) + '.' + aab[1], np.uint8(image))

                ####存储所有的检测框
                labels = config['model']['labels']
                quiet = True
                key = []
                prob = []
                for box in boxes:
                    label_str = ''
                    label = -1

                    for i in range(len(labels)):
                        if box.classes[i] > obj_thresh:
                            if label_str != '': label_str += ', '
                            label_str += (
                                labels[i] + ' ' +
                                str(round(box.get_score() * 100, 2)) + '%'
                            )  ###概率值
                            key = labels[i]
                            prob = box.get_score()
                            label = i
                            if not quiet: print(label_str)
                    color = [0, 0, 255]
                    if key == "missile": color = [0, 0, 255]
                    if key == "oiltank": color = [0, 159, 255]
                    if key == "plane": color = [0, 255, 0]
                    if key == "warship": color = [255, 0, 0]

                    if label >= 0:
                        flag = True
                        text_size = cv2.getTextSize(label_str,
                                                    cv2.FONT_HERSHEY_SIMPLEX,
                                                    1.1e-3 * image.shape[0], 5)
                        width, height = text_size[0][0], text_size[0][1]
                        region = np.array(
                            [[box.xmin - 3, box.ymin],
                             [box.xmin - 3, box.ymin - height - 26],
                             [box.xmin + width + 13, box.ymin - height - 26],
                             [box.xmin + width + 13, box.ymin]],
                            dtype='int32')
                        cv2.rectangle(image, (box.xmin, box.ymin),
                                      (box.xmax, box.ymax),
                                      color,
                                      thickness=2)
                        aaa = image_path.split('/')
                        aab = aaa[-1].split('.')
                        cv2.imwrite(
                            output_path + aab[0] + '_' + str(m * mW + n + 1) +
                            '.' + aab[1], np.uint8(image))
                        print(output_path + aab[0] + '_' +
                              str(m * mW + n + 1) + '.' + aab[1])
                        #image = QtGui.QPixmap(output_path + aab[0]  + '_' + str(m*mW+n+1) + '.' + aab[1])
                        #pic_label.setPixmap(image)
                        #pic_label.setScaledContents(True)

                        object_real_x1 = box.xmin + strideN * n
                        object_real_y1 = box.ymin + strideN * m
                        object_real_x2 = box.xmax + strideN * n
                        object_real_y2 = box.ymax + strideN * m

                        object_key.append(key)
                        object_pro.append(prob)
                        object_x1.append(object_real_x1)
                        object_y1.append(object_real_y1)
                        object_x2.append(object_real_x2)
                        object_y2.append(object_real_y2)
                if flag:
                    flag = False
                    aaa = image_path.split('/')
                    aab = aaa[-1].split('.')
                    image = QtGui.QPixmap(output_path + aab[0] + '_' +
                                          str(m * mW + n + 1) + '.' + aab[1])
                    pic_label.setPixmap(image)
                    pic_label.setScaledContents(True)

        ##非极大值抑制
        imgCopy2 = imageO.copy()
        object_name = ["missile", "oiltank", "plane", "warship"]
        for object_class in range(len(object_name)):
            x1 = []
            y1 = []
            x2 = []
            y2 = []
            prob = []
            for numR in range(len(object_key)):
                if object_key[numR] == object_name[object_class]:
                    x1.append(object_x1[numR])
                    y1.append(object_y1[numR])
                    x2.append(object_x2[numR])
                    y2.append(object_y2[numR])
                    prob.append(object_pro[numR])
            if len(x1) > 0:
                x1 = np.array(x1)
                y1 = np.array(y1)
                x2 = np.array(x2)
                y2 = np.array(y2)
                prob = np.array(prob)

                x1, y1, x2, y2, probs = non_max_suppression(x1,
                                                            y1,
                                                            x2,
                                                            y2,
                                                            prob,
                                                            overlap_thresh=0.5,
                                                            max_boxes=30)

                for numLR in range(len(x1)):
                    real_x1 = x1[numLR]
                    real_y1 = y1[numLR]
                    real_x2 = x2[numLR]
                    real_y2 = y2[numLR]

                    color = [0, 0, 255]
                    if object_name[object_class] == "missile":
                        color = [0, 0, 255]
                    if object_name[object_class] == "oiltank":
                        color = [0, 159, 255]
                    if object_name[object_class] == "plane":
                        color = [0, 255, 0]
                    if object_name[object_class] == "warship":
                        color = [255, 0, 0]
                    cv2.rectangle(imgCopy2, (real_x1, real_y1),
                                  (real_x2, real_y2), color, 2)
        cv2.imwrite(output_path + image_path.split('/')[-1], imgCopy2)
        image = QtGui.QPixmap(output_path + image_path.split('/')[-1])
        pic_label.setPixmap(image)
        pic_label.setScaledContents(True)