Beispiel #1
0
def main():

    # Load config
    config = get_config_from_json('config/model.config')

    # Load data
    [X_train, Y_train, X_CV, Y_CV, X_test, Y_test] = load_data(0.18)

    # Generate dataset
    test_dataset = MyDataset(X_test, Y_test)

    # Create Data Loaders
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=config.model.batch_size,
                             shuffle=False)

    # Build model
    model = CNNModel()
    model = model.double()
    model.eval()

    if os.path.isfile(config.model.path):
        model.load_state_dict(torch.load(config.model.path))
        print('Loaded checkpoint..')
    else:
        print('checkpoint not found..')

    evaluate(model, test_loader)
Beispiel #2
0
    def on_epoch_end(self, epoch, logs=None):

        average_precisions = evaluate(
            self.model_to_save,
            self.valid_generator,
            net_h=self.config["model"]["min_input_size"],
            net_w=self.config["model"]["min_input_size"])
        log_message = ""
        log_message += "epoch : " + str(epoch + 1) + "\n"
        # print the score
        for label, average_precision in average_precisions.items():
            log_message += self.config["model"]["labels"][
                label] + ': {:.4f}'.format(average_precision) + "\n"
            print(self.config["model"]["labels"][label] +
                  ': {:.4f}'.format(average_precision))
        print('mAP: {:.4f}'.format(
            sum(average_precisions.values()) / len(average_precisions)))
        log_message += 'mAP: {:.4f}'.format(
            sum(average_precisions.values()) /
            len(average_precisions)) + "\n\n"
        log_message += "==============================================\n\n"

        f = open(os.path.join("./", self.train_mAP, self.log_file), "a+")
        f.write(log_message)
        f.close()
        super(My_Checkpoint, self).on_batch_end(epoch, logs)
Beispiel #3
0
def fit_model():
    n_clusters = K
    print('')
    print('=' * 50)
    print("Let's start with {} clusters!".format(n_clusters))
    print('=' * 50)
    print('')

    train_test_clu, train_test_reg, df_weather = read_dataset()

    clusterer = create_clusterer(n_clusters)
    df_clustered_list, sequence, label_df_test = clusterer.fit(train_test_clu)

    classifier = create_classifier()
    best_classifier, test_clf = classifier.fit(train_test_clu, sequence,
                                               df_weather)

    regressor = create_regressor()
    best_model_set = regressor.fit(df_clustered_list, train_test_reg)

    pred_for_plot, true_for_plot = evaluate(label_df_test, train_test_reg,
                                            test_clf, best_classifier,
                                            best_model_set, sequence)

    return
Beispiel #4
0
 def on_epoch_end(self, epoch, logs=None):
     average_precisions = evaluate(self.infer_model, self.valid_generator)
     print(f'Epoch {epoch} Validation Metrics:')
     print('     mAP: {:.4f}'.format(
         sum(average_precisions.values()) / len(average_precisions)))
     for label, average_precision in average_precisions.items():
         print('         ' + self.labels[label] +
               ': {:.4f}'.format(average_precision))
Beispiel #5
0
def _main_(args):
    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())
    print(config['valid']['valid_annot_folder'])

    ###############################
    #   Create the validation generator
    ###############################
    valid_ints, labels = parse_voc_annotation(
        config['valid']['valid_annot_folder'],
        config['valid']['valid_image_folder'], config['valid']['cache_name'],
        config['model']['labels'])

    labels = labels.keys() if len(
        config['model']['labels']) == 0 else config['model']['labels']
    labels = sorted(labels)

    valid_generator = BatchGenerator(
        instances=valid_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=0,
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.0,
        norm=normalize)

    ###############################
    #   Load the model and do evaluation
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']

    infer_model = load_model(config['train']['saved_weights_name'])

    # compute mAP for all the classes
    average_precisions = evaluate(infer_model,
                                  valid_generator,
                                  iou_threshold=iou_threshold,
                                  obj_thresh=obj_thresh,
                                  nms_thresh=nms_thresh,
                                  net_h=net_h,
                                  net_w=net_w)
    # print("recall : {}".format(recall))
    # print("precision : {}".format(precision))
    # plt.plot(recall, precision)
    # plt.show()

    # print the score
    for label, average_precision in average_precisions.items():
        print(labels[label] + ': {:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))
Beispiel #6
0
def train_rnn_model(train_loader, val_loader, num_features, num_epochs,
                    use_cuda, path_output):
    """
    Use train and validation loader to train the variable RNN model
    Input: train_loader, val_loader
    Output: trained best model
    """
    device = torch.device(
        "cuda" if torch.cuda.is_available() and use_cuda else "cpu")
    torch.manual_seed(1)
    if device.type == "cuda":
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    model = VariableRNN(num_features)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters())

    model.to(device)
    criterion.to(device)

    best_val_acc = 0.0

    train_losses, train_accuracies = [], []
    valid_losses, valid_accuracies = [], []

    for epoch in range(num_epochs):

        train_loss, train_accuracy = train(model, device, train_loader,
                                           criterion, optimizer, epoch)
        valid_loss, valid_accuracy, valid_results = evaluate(
            model, device, val_loader, criterion)

        train_losses.append(train_loss)
        valid_losses.append(valid_loss)

        train_accuracies.append(train_accuracy)
        valid_accuracies.append(valid_accuracy)

        is_best = valid_accuracy > best_val_acc

        if is_best:
            best_val_acc = valid_accuracy
            torch.save(
                model,
                os.path.join(path_output, "VariableRNN.pth"),
                _use_new_zipfile_serialization=False,
            )

    best_model = torch.load(os.path.join(path_output, "VariableRNN.pth"))
    return (
        best_model,
        train_losses,
        valid_losses,
        train_accuracies,
        valid_accuracies,
        valid_results,
    )
Beispiel #7
0
def _main_(args):
    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    ###############################
    #   Create the validation generator
    ###############################
    valid_ints, labels = parse_voc_annotation(
        config['valid']['valid_annot_folder'],
        config['valid']['valid_image_folder'], config['valid']['cache_name'],
        config['model']['labels'])

    labels = labels.keys() if len(
        config['model']['labels']) == 0 else config['model']['labels']
    labels = sorted(labels)

    valid_generator = BatchGenerator(
        instances=valid_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=0,
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.0,
        norm=normalize)

    ###############################
    #   Load the model and do evaluation
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']

    h5_files = []

    for root, dirs, files in os.walk('.'):
        for weights in files:
            if weights[-3:] == '.h5':
                if weights[:6] == 'helmet':
                    h5_files.append(weights)
    print(h5_files)

    for i in h5_files:

        infer_model = load_model(i)

        # compute mAP for all the classes
        average_precisions = evaluate(infer_model, valid_generator)

        # print the score
        for label, average_precision in average_precisions.items():
            print(labels[label] + ': {:.4f}'.format(average_precision))
        print('mAP: {:.4f} of weight {}'.format(
            sum(average_precisions.values()) / len(average_precisions), i))
Beispiel #8
0
def evaluate_main_(args):
    config_path = args.conf

    yolo_config_file_exit('pass') \
        if os.path.isfile(config_path) \
        else yolo_config_file_exit('fail')

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    ###############################
    #   Create the validation generator
    ###############################
    valid_ints, labels = parse_voc_annotation(
        config['valid']['valid_annot_folder'],
        config['valid']['valid_image_folder'], config['valid']['cache_name'],
        config['model']['labels'])

    labels = labels.keys() if len(config['model']['labels']) == 0 \
        else config['model']['labels']
    labels = sorted(labels)

    valid_generator = BatchGenerator(
        instances=valid_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=32,
        # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=0,
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.0,
        norm=normalize)

    ###############################
    #   Load the model and do evaluation
    ###############################
    #os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']

    infer_model = load_model(config['train']['saved_weights_name'])

    # compute mAP for all the classes
    average_precisions = evaluate(infer_model, valid_generator)

    # print the score
    for label, average_precision in average_precisions.items():
        print(labels[label] + ': {:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))

    try:
        yolo_process_exit('pass')
    except RuntimeError as e:
        yolo_process_exit('fail')
Beispiel #9
0
    def validate(self):
        """
        One cycle of model validation
        :return:
        """
        self.testmodel.load_state_dict(self.model.state_dict())
        self.testmodel.eval()

        gpu_device = self.config.gpu_device

        if self.config.data_loader == 'Isc':
            nmi, train_recall = evaluate(gpu_device,
                                         self.testmodel,
                                         self.data_loader.train_loader,
                                         self.config.train_classes,
                                         name='tra_similar.jpg')
            nmi, recall = QG_evaluate(gpu_device,
                                      self.testmodel,
                                      self.data_loader.query_loader,
                                      self.data_loader.gallery_loader,
                                      self.config.test_classes,
                                      name='tes_similar.jpg')
        else:
            nmi, train_recall = evaluate(gpu_device,
                                         self.testmodel,
                                         self.data_loader.valid_loader,
                                         self.config.train_classes,
                                         name='tra_similar.jpg')
            nmi, recall = evaluate(gpu_device,
                                   self.testmodel,
                                   self.data_loader.test_loader,
                                   self.config.test_classes,
                                   name='tes_similar.jpg')
        self.logger.info("**Evaluating...**")
        self.logger.info("NMI: {:.3f}".format(nmi * 100))
        if nmi != 0:
            for i, k in enumerate([1, 2, 4, 8]):
                self.logger.info("R@{} : {:.3f}".format(k, 100 * recall[i]))
            return train_recall[0], recall[0]
        else:
            for i, k in enumerate([1, 10, 20, 30]):
                self.logger.info("R@{} : {:.3f}".format(k, 100 * recall[i]))
            return train_recall[0], recall[0]
Beispiel #10
0
def _main_(args):
    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    ###############################
    #   Create the validation generator
    ###############################
    valid_ints, labels = parse_voc_annotation(
        config['valid']['valid_annot_folder'],
        config['valid']['valid_image_folder'], config['valid']['cache_name'],
        config['model']['labels'])

    labels = labels.keys() if len(
        config['model']['labels']) == 0 else config['model']['labels']
    labels = sorted(labels)

    valid_generator = BatchGenerator(
        instances=valid_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=0,
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.0,
        norm=normalize)

    ###############################
    #   Load the model and do evaluation
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']

    infer_model = load_model(config['train']['saved_weights_name'])

    # compute mAP for all the classes
    average_precisions, recall, precision = evaluate(infer_model,
                                                     valid_generator)

    # print the score
    for (c, ap), (c, prec), (c, call) in zip(average_precisions.items(),
                                             precision.items(),
                                             recall.items()):
        print(
            "+ Class {c} - AP: {ap}, precision: {prec}, recall: {call}".format(
                c=c, ap=ap, prec=prec, call=call))
    map = np.mean(list(average_precisions.values()))
    mprec = np.mean(list(precision.values()))
    mrecall = np.mean(list(recall.values()))
    print("mAP: {map}, mprec: {mprec}, mrecall: {mrecall}".format(
        map=map, mprec=mprec, mrecall=mrecall))
Beispiel #11
0
def main(argv):
    C0 = 0
    C1 = 8

    # Read args from command line
    sampleSize = utils.parseArgs(argv)

    # Load the train and test sets from MNIST
    print("Loading datasets from MNIST...")
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    # Apply preprocessing to the training and test sets
    print("Preprocessing training set...")
    x_train, y_train = utils.preprocess(x_train, y_train, C0, C1)
    print("Preprocessing testing set...")
    x_test, y_test = utils.preprocess(x_test, y_test, C0, C1)

    # Apply feature selection to training set
    print("Applying feature selection...")
    x_train, x_test = utils.featureSelection(x_train, x_test)

    # Split training set by class
    x0_train = [_ for i, _ in enumerate(x_train) if y_train[i] == 0]
    x1_train = [_ for i, _ in enumerate(x_train) if y_train[i] == 1]

    # Take random sample of each class of training set
    print("Sampling {}% of training set".format(sampleSize * 100))
    x0_train_sample = random.sample(x0_train, int(len(x0_train) * sampleSize))
    x1_train_sample = random.sample(x1_train, int(len(x1_train) * sampleSize))

    # Use Dr Arodz's code to get MAP estimate
    print(
        "Running Dr Arodz's code to obtain MAP estimates of means and covariance"
    )
    m0, m1, cov = Arodz(x0_train_sample, x1_train_sample)

    # Predict labels for test set
    print("Testing model...")
    labels = predict(m0, m1, cov, x_test)

    # Evaluate label accuracy
    utils.evaluate(labels, y_test)
def main(_):
    image_path = FLAGS.test
    csv_path = os.path.splitext(image_path)[0] + ".csv"
    
    # --------- load classifier ------- #
    cascade = cv2.CascadeClassifier(FLAGS.cascade_xml)
    model, x, keep_prob = get_nn_classifier()
    
    # ---------- object detection ------------#    
    print 'starting detection of ' + FLAGS.test + '...'
    
    img = utils.getImage(image_path)
    img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
    
    delta = [-2, -1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9, 0.95, 0.99, 0.995, 0.999, 0.9995, 0.9999]
    
    start = time.time()
    candidates = cascade.detectMultiScale(img, scaleFactor=FLAGS.scaleFactor, minNeighbors=FLAGS.minNeighbors, maxSize=(FLAGS.max_window_size,FLAGS.max_window_size))
    detected = nn_classification(candidates, img, model, x, keep_prob, delta)
    elapsed = (time.time() - start)  
    
    print 'detection time: %d' % elapsed

    # ------------- evaluation --------------#
        
    ground_truth_data = utils.get_ground_truth_data(csv_path)
    
    for j in xrange(0, len(delta)):
        detected[j] = [Rect(x, y, w, h) for (x,y,w,h) in detected[j]]
        tp, fn, fp = utils.evaluate(ground_truth_data, detected[j])
        
        # ----------------output ----------------#
        # image output
        """
        img_out = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        for (x,y,w,h) in detected[j]:
            cv2.rectangle(img_out, (x-w/2,y-h/2),(x+w/2,y+h/2), [0,255,0], 3)
                    
        for c in ground_truth_data:
            cv2.circle(img_out, (c[0], c[1]), 3, [0,0,255],3)
                
        output_file = "out" + '_' + str(datetime.datetime.now())
        cv2.imwrite(FLAGS.output_dir + output_file + '.png', img_out)
        """
        # csv output
        with open(FLAGS.output_dir + FLAGS.out + '.csv', 'ab') as file:
            writer = csv.writer(file, delimiter=',')
            writer.writerow([FLAGS.test, str(elapsed), str(len(ground_truth_data)), delta[j], FLAGS.minNeighbors, FLAGS.scaleFactor, 
                            str(len(detected[j])), str(tp), str(fp), str(fn)])
Beispiel #13
0
def main(argv):
    C0 = 0
    C1 = 8

    # Read args from command line
    sampleSize = utils.parseArgs(argv)

    # Load the train and test sets from MNIST
    print("Loading datasets from MNIST...")
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    # Apply preprocessing to the training and test sets
    print("Preprocessing training set...")
    x_train, y_train = utils.preprocess(x_train, y_train, C0, C1)
    print("Preprocessing testing set...")
    x_test, y_test = utils.preprocess(x_test, y_test, C0, C1)
    
    # Apply feature selection to training set
    # print("Applying feature selection...")
    # x_train, x_test = utils.featureSelection(x_train, x_test)

    # Sample training set
    sampleIndicies = random.sample(range(len(x_train)), int(len(x_train)*sampleSize))
    x_train_sample = [_ for i, _ in enumerate(x_train) if i in sampleIndicies]
    y_train_sample = [_ for i, _ in enumerate(y_train) if i in sampleIndicies]

    # Obtain MAP estimates
    print("Running Dr Arodz's code to obtain MAP estimates of w and b")
    w, b = Arodz(x_train_sample, y_train_sample)

    # Predict labels for test set
    print("Testing model...")
    labels = predict(w, b, x_test)

    # Evaluate label accuracy
    utils.evaluate(labels, y_test)
Beispiel #14
0
def _main_(args):
    config_path = args.conf

    with open(config_path) as config_buffer:    
        config = json.loads(config_buffer.read())

    ###############################
    #   Create the validation generator
    ###############################  
    valid_ints, labels = parse_voc_annotation(
        config['valid']['valid_annot_folder'], 
        config['valid']['valid_image_folder'], 
        config['valid']['cache_name'],
        config['model']['labels']
    )

    labels = labels.keys() if len(config['model']['labels']) == 0 else config['model']['labels']
    labels = sorted(labels)
   
    valid_generator = BatchGenerator(
        instances           = valid_ints, 
        anchors             = config['model']['anchors'],   
        labels              = labels,        
        downsample          = 32, # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image   = 0,
        batch_size          = config['train']['batch_size'],
        min_net_size        = config['model']['min_input_size'],
        max_net_size        = config['model']['max_input_size'],   
        shuffle             = True, 
        jitter              = 0.0, 
        norm                = normalize
    )

    ###############################
    #   Load the model and do evaluation
    ###############################
    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']

    infer_model = load_model(config['train']['saved_weights_name'])

    # compute mAP for all the classes
    average_precisions = evaluate(infer_model, valid_generator)

    # print the score
    for label, average_precision in average_precisions.items():
        print(labels[label] + ': {:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))           
def main():
    image_path = FLAGS.test
    csv_path = os.path.splitext(image_path)[0] + ".csv"
    
    # ------------ load classifier ---------- #
    cascade = cv2.CascadeClassifier(FLAGS.cascade_xml)
    
    # -------------- open image --------------#
    img = utils.getImage(image_path)
    img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
    
    # ---------- object detection ------------#    
    print 'starting detection of ' + FLAGS.test + '...'
    
    start = time.time()
    detected = cascade.detectMultiScale(img, scaleFactor=FLAGS.scaleFactor, minNeighbors=FLAGS.minNeighbors, maxSize=(FLAGS.max_window_size, FLAGS.max_window_size))
    elapsed = (time.time() - start)
    print 'detection time: %d' % elapsed
    
    # ------------- evaluation --------------#
    detected = [Rect(x, y, w, h) for (x,y,w,h) in detected]
    ground_truth_data = utils.get_ground_truth_data(csv_path)
    
    tp, fn, fp = utils.evaluate(ground_truth_data, detected)
        
    # ----------------output ----------------#
    # image output
    """
    img_out = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
    
    for c in ground_truth_data:
        cv2.circle(img_out, (c[0], c[1]), 3, [0,0,255],3)
        
    for r in detected:
        cv2.rectangle(img_out, (r.x, r.y), (r.x2(), r.y2()), [0,255,0], 2)
        
    output_file = "out" + '_' + str(datetime.datetime.now())
    cv2.imwrite(FLAGS.output_dir + output_file + '.png', img_out)
    """
    # csv output
    with open(FLAGS.output_dir + 'results.csv', 'ab') as file:
        writer = csv.writer(file, delimiter=',')
        writer.writerow([FLAGS.test, str(elapsed),str(len(ground_truth_data)), str(FLAGS.scaleFactor), 
                         str(FLAGS.minNeighbors), str(len(detected)), str(tp), str(fp), str(fn)])
Beispiel #16
0
def _main_(args):
    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    ###############################
    #   Create the validation generator
    ###############################
    valid_ints, labels = parse_voc_annotation(
        config['valid']['valid_annot_folder'],
        config['valid']['valid_image_folder'], config['model']['labels'])

    labels = sorted(labels.keys())

    valid_generator = BatchGenerator(
        instances=valid_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=config['model']['max_box_per_image'],
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.0,
        norm=normalize)

    ###############################
    #   Load the model and do evaluation
    ###############################
    infer_model = load_model(config['train']['saved_weights_name'])

    # compute mAP for all the classes
    average_precisions = evaluate(infer_model, valid_generator)

    # print the score
    for label, average_precision in average_precisions.items():
        print(labels[label] + ': {:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))
Beispiel #17
0
def test(args):
    config_path = 'config.json'

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    data_dir = args.data_dir
    annotation_dir = args.annotation_dir
    batch_size = args.batch_size

    images_path = data_dir
    annotation_path = annotation_dir
    labels = config['model']['labels']

    # images_path = config['train']['train_image_folder']
    # annotation_path = config['train']['train_annot_folder']
    # batch_size=config['train']['batch_size']

    test_list, label_list = parse_voc(images_path, annotation_path, labels)
    label_list = labels

    max_box_per_image = max([len(images['object']) for images in test_list])

    test_generator = Dataloader(train_list=test_list,
                                label_list=label_list,
                                anchors=config['model']['anchors'],
                                max_box_per_image=max_box_per_image,
                                batch_size=batch_size)

    json_file = open('logs/models/model_architecture.json', 'r')
    infer_model_json = json_file.read()
    json_file.close()
    test_model = model_from_json(infer_model_json)
    test_model.load_weights('./logs/weights/voc.h5')

    average_precisions = evaluate(test_model, test_generator)
    print('\n')
    for label, average_precision in average_precisions.items():
        print(labels[label] + ': {:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))
        'npratio': 4,
        'dropout_p': 0.2,
        'query_dim': 200,
        'embedding_dim': 300,
        'filter_num': 400,
        'value_dim': 16,
        'head_num': 16,
        'epochs': 5,
        'metrics': 'group_auc,ndcg@5,ndcg@10,mean_mrr',
        'attrs': ['title'],
    }
    hparams = load_hparams(hparams)

    device = torch.device(hparams['device'])

    vocab, loader_train, loader_test, loader_validate = prepare(hparams,
                                                                validate=True)
    gcaModel = GCAModel(vocab=vocab, hparams=hparams).to(device)

    if hparams['mode'] == 'test':
        gcaModel.load_state_dict(torch.load(hparams['save_path']))
        print("testing...")
        evaluate(gcaModel, hparams, loader_test)

    elif hparams['mode'] == 'train':
        train(gcaModel,
              hparams,
              loader_train,
              loader_test,
              loader_validate,
              tb=True)
Beispiel #19
0
def _main_(args):
    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    ###############################
    #   Create the validation generator
    ###############################
    valid_ints, labels = parse_voc_annotation(
        config['valid']['valid_annot_folder'],
        config['valid']['valid_image_folder'], config['valid']['cache_name'],
        config['model']['labels'])

    labels = labels.keys() if len(
        config['model']['labels']) == 0 else config['model']['labels']
    labels = sorted(labels)

    valid_generator = BatchGenerator(
        instances=valid_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=0,
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.0,
        norm=normalize)

    ###############################
    #   Load the model and do evaluation
    ###############################

    ###############################
    #   Create the model
    ##############################

    train_model, infer_model = create_yolov3_model(
        nb_class=len(labels),
        anchors=config['model']['anchors'],
        max_box_per_image=0,
        max_grid=[
            config['model']['max_input_size'],
            config['model']['max_input_size']
        ],
        batch_size=config['train']['batch_size'],
        warmup_batches=0,
        ignore_thresh=config['train']['ignore_thresh'],
        grid_scales=config['train']['grid_scales'],
        obj_scale=config['train']['obj_scale'],
        noobj_scale=config['train']['noobj_scale'],
        xywh_scale=config['train']['xywh_scale'],
        class_scale=config['train']['class_scale'],
    )

    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']

    saved_weights_name = config['train']['saved_weights_name']
    lr = config['train']['learning_rate'],

    infer_model.load_weights(saved_weights_name)
    optimizer = Adam(lr=lr, clipnorm=0.001)
    infer_model.compile(loss=dummy_loss, optimizer=optimizer)

    infer_model.summary()
    #infer_model = load_model(config['train']['saved_weights_name'])

    # compute mAP for all the classes
    recall, precision, average_precisions = evaluate(infer_model,
                                                     valid_generator)

    # print the score
    for label, average_precision in average_precisions.items():
        print(labels[label] + ': {:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))

    return recall, precision, average_precisions
Beispiel #20
0
def _main_(args):
    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    ###############################
    #   Parse the annotations
    ###############################
    train_ints, valid_ints, labels, max_box_per_image = create_training_instances(
        config['train']['train_annot_folder'],
        config['train']['train_image_folder'], config['train']['cache_name'],
        config['valid']['valid_annot_folder'],
        config['valid']['valid_image_folder'], config['valid']['cache_name'],
        config['model']['labels'])
    print('\nTraining on: \t' + str(labels) + '\n')

    ###############################
    #   Create the generators
    ###############################
    train_generator = BatchGenerator(
        instances=train_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=max_box_per_image,
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.3,
        norm=normalize)

    valid_generator = BatchGenerator(
        instances=valid_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=max_box_per_image,
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.0,
        norm=normalize)

    ###############################
    #   Create the model
    ###############################
    if os.path.exists(config['train']['saved_weights_name']):
        config['train']['warmup_epochs'] = 0
    warmup_batches = config['train']['warmup_epochs'] * (
        config['train']['train_times'] * len(train_generator))

    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    multi_gpu = len(config['train']['gpus'].split(','))

    train_model, infer_model = create_model(
        nb_class=len(labels),
        anchors=config['model']['anchors'],
        max_box_per_image=max_box_per_image,
        max_grid=[
            config['model']['max_input_size'],
            config['model']['max_input_size']
        ],
        batch_size=config['train']['batch_size'],
        warmup_batches=warmup_batches,
        ignore_thresh=config['train']['ignore_thresh'],
        multi_gpu=multi_gpu,
        saved_weights_name=config['train']['saved_weights_name'],
        lr=config['train']['learning_rate'],
        grid_scales=config['train']['grid_scales'],
        obj_scale=config['train']['obj_scale'],
        noobj_scale=config['train']['noobj_scale'],
        xywh_scale=config['train']['xywh_scale'],
        class_scale=config['train']['class_scale'],
    )

    ###############################
    #   Kick off the training
    ###############################
    callbacks = create_callbacks(config['train']['saved_weights_name'],
                                 config['train']['tensorboard_dir'],
                                 infer_model)

    history = train_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=len(train_generator) *
        config['train']['train_times'],  #(train_images/4)*train_times
        epochs=config['train']['nb_epochs'] + config['train']['warmup_epochs'],
        verbose=2 if config['train']['debug'] else 1,
        callbacks=callbacks,
        workers=4,
        max_queue_size=8,
        validation_data=valid_generator,
        validation_steps=len(valid_generator) * config['valid']['valid_times'])

    # Prepare folder to save in
    time = datetime.now()
    time_str = time.strftime("%Y%m%d-%H:%M")
    cwd = os.getcwd()
    save_path = os.path.join(cwd, time_str)
    print('Data saved in: ' + save_path)
    makedirs(save_path)

    # Copy and save config file
    save_config_path = os.path.join(save_path, 'config.json')
    shutil.copy2(config_path, save_config_path)

    # plotting train data and saving mat file with data
    plot_matlab(history, config['data']['plot_png'],
                config['data']['save_mat'], save_path)

    # Save the loaded models to .TXT
    save_template_path = os.path.join(save_path, 'train_model_params.txt')
    with open(save_template_path, 'w') as fh:
        # Pass the file handle in as a lambda function to make it callable
        train_model.summary(print_fn=lambda x: fh.write(x + '\n'))

    # make a GPU version of infer_model for evaluation
    if multi_gpu > 1:
        infer_model = load_model(config['train']['saved_weights_name'])

    ###############################
    #   Run the evaluation
    ###############################
    # compute mAP for all the classes
    average_precisions = evaluate(infer_model, valid_generator)

    # print the score
    print('========== VALIDATION ==========')
    for label, average_precision in average_precisions.items():
        print(labels[label] + ': {:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))
Beispiel #21
0
def _main_(args):
    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    ###############################
    #   Parse the annotations
    ###############################
    train_ints, valid_ints, labels = create_training_instances(
        config['train']['train_annot_folder'],
        config['train']['train_image_folder'], config['train']['cache_name'],
        config['valid']['valid_annot_folder'],
        config['valid']['valid_image_folder'], config['valid']['cache_name'],
        config['model']['labels'])

    ###############################
    #   Create the generators
    ###############################
    train_generator = BatchGenerator(
        instances=train_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=config['model']['max_box_per_image'],
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.3,
        norm=normalize)

    valid_generator = BatchGenerator(
        instances=valid_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=config['model']['max_box_per_image'],
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.0,
        norm=normalize)

    ###############################
    #   Create the model
    ###############################
    if os.path.exists(config['train']['saved_weights_name']):
        warmup_batches = 0  # no need warmup if the pretrained weight exists
    else:
        warmup_batches  = config['train']['warmup_epochs'] * (config['train']['train_times']*len(train_generator) + \
                                                              config['valid']['valid_times']*len(valid_generator))

    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    multi_gpu = len(config['train']['gpus'].split(','))

    train_model, infer_model = create_model(
        nb_class=len(labels),
        anchors=config['model']['anchors'],
        max_box_per_image=config['model']['max_box_per_image'],
        max_grid=[
            config['model']['max_input_size'],
            config['model']['max_input_size']
        ],
        batch_size=config['train']['batch_size'],
        warmup_batches=warmup_batches,
        ignore_thresh=config['train']['ignore_thresh'],
        multi_gpu=multi_gpu,
        saved_weights_name=config['train']['saved_weights_name'])

    ###############################
    #   Kick off the training
    ###############################
    optimizer = Adam(lr=config['train']['learning_rate'],
                     beta_1=0.9,
                     beta_2=0.999,
                     epsilon=1e-08,
                     decay=0.0)
    train_model.compile(loss=dummy_loss, optimizer=optimizer)

    callbacks = create_callbacks(config['train']['saved_weights_name'])
    try:
        train_model.fit_generator(generator=train_generator,
                                  steps_per_epoch=len(train_generator) *
                                  config['train']['train_times'],
                                  epochs=config['train']['nb_epochs'] +
                                  config['train']['warmup_epochs'],
                                  verbose=2 if config['train']['debug'] else 1,
                                  validation_data=valid_generator,
                                  validation_steps=len(valid_generator) *
                                  config['valid']['valid_times'],
                                  callbacks=callbacks,
                                  workers=4,
                                  max_queue_size=8)
    except:
        pass

    # load the best weight before early stop
    train_model.load_weights(config['train']['saved_weights_name'])

    if multi_gpu > 1:
        # fix the saved model structure when multi_gpu > 1
        train_model.get_layer("model_1").save(
            config['train']['saved_weights_name'])

        # load the best weight to the infer_model
        infer_model.load_weights(config['train']['saved_weights_name'])

    # save the weight with the model structure of infer_model
    infer_model.save(config['train']['saved_weights_name'])

    # make a GPU version of infer_model for evaluation
    if multi_gpu > 1:
        infer_model = load_model(config['train']['saved_weights_name'])

    ###############################
    #   Run the evaluation
    ###############################
    # compute mAP for all the classes
    average_precisions = evaluate(infer_model, valid_generator)

    # print the score
    for label, average_precision in average_precisions.items():
        print(labels[label] + ': {:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))
Beispiel #22
0
    'depth_loss_function': depth_loss_function
}

# Load model into GPU / CPU
print('Loading model...')
model = load_model(args.model, custom_objects=custom_objects, compile=False)

# Load test data
print('Loading test data...', end='')
import numpy as np
from data import extract_zip
data = extract_zip('nyu_test.zip')
from io import BytesIO
rgb = np.load(BytesIO(data['eigen_test_rgb.npy']))
depth = np.load(BytesIO(data['eigen_test_depth.npy']))
crop = np.load(BytesIO(data['eigen_test_crop.npy']))
print('Test data loaded.\n')

start = time.time()
print('Testing...')

e = evaluate(model, rgb, depth, crop, batch_size=6)

print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format(
    'a1', 'a2', 'a3', 'rel', 'rms', 'log_10'))
print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".format(
    e[0], e[1], e[2], e[3], e[4], e[5]))

end = time.time()
print('\nTest time', end - start, 's')
Beispiel #23
0
def main():
    '''
    Training and evaluation of the model.
    '''
    print('Training starts...')
    for epoch in range(num_of_epoch):
        print('\nEpoch', epoch + 1)
        # log the start time of the epoch
        start = time.time()
        # set the models in training mode
        clstm.train()
        policy_s.train()
        policy_n.train()
        policy_c.train()
        # reset the count of reread_or_skim_times
        reread_or_skim_times = 0
        policy_loss_sum = []
        encoder_loss_sum = []
        baseline_value_batch = []
        for index, train in enumerate(train_iterator):
            label = train.label.to(
                torch.long
            )  # for cross entropy loss, the long type is required
            text = train.text.view(CHUNCK_SIZE, BATCH_SIZE,
                                   CHUNCK_SIZE)  # transform 1*400 to 20*1*20
            curr_step = 0  # the position of the current chunk
            h_0 = torch.zeros([1, 1, 128]).to(device)  # run on GPU
            c_0 = torch.zeros([1, 1, 128]).to(device)
            count = 0  # maximum skim/reread time: 5
            baseline_value_ep = []
            saved_log_probs = []  # for the use of policy gradient update
            # collect the computational costs for every time step
            cost_ep = []
            while curr_step < CHUNCK_SIZE and count < 5:
                # Loop until a text can be classified or currstep is up to 20 or count reach the maximum i.e. 5.
                # update count
                count += 1
                # pass the input through cnn-lstm and policy s
                text_input = text[curr_step]  # text_input 1*20
                ht, ct = clstm(text_input, h_0, c_0)  # 1 * 128
                # separate the value which is the input of value net
                ht_ = ht.clone().detach().requires_grad_(True)
                # compute a baseline value for the value network
                bi = value_net(ht_)
                # 1 * 1 * 128, next input of lstm
                h_0 = ht.unsqueeze(0)
                c_0 = ct
                # draw a stop decision
                stop_decision, log_prob_s = sample_policy_s(ht, policy_s)
                stop_decision = stop_decision.item()
                if stop_decision == 1:  # classify
                    break
                else:
                    reread_or_skim_times += 1
                    # draw an action (reread or skip)
                    step, log_prob_n = sample_policy_n(ht, policy_n)
                    curr_step += int(step)  # reread or skip
                    if curr_step < CHUNCK_SIZE and count < 5:
                        # If the code can still execute the next loop, it is not the last time step.
                        cost_ep.append(clstm_cost + s_cost + n_cost)
                        # add the baseline value
                        baseline_value_ep.append(bi)
                        # add the log prob for the current actions
                        saved_log_probs.append(log_prob_s + log_prob_n)
            # draw a predicted label
            output_c = policy_c(ht)
            # cross entrpy loss input shape: input(N, C), target(N)
            loss = criterion(output_c, label)  # positive value
            # draw a predicted label
            pred_label, log_prob_c = sample_policy_c(output_c)
            if stop_decision == 1:
                # add the cost of the last time step
                cost_ep.append(clstm_cost + s_cost + c_cost)
                saved_log_probs.append(log_prob_s + log_prob_c)
            else:
                # add the cost of the last time step
                cost_ep.append(clstm_cost + s_cost + c_cost + n_cost)
                # At the moment, the probability of drawing a stop decision is 1,
                # so its log probability is zero which can be ignored in th sum.
                saved_log_probs.append(log_prob_c.unsqueeze(0))
            # add the baseline value
            baseline_value_ep.append(bi)
            # add the cross entropy loss
            encoder_loss_sum.append(loss)
            # compute the policy losses and value losses for the current episode
            policy_loss_ep, value_losses = compute_policy_value_losses(
                cost_ep, loss, saved_log_probs, baseline_value_ep, alpha,
                gamma)
            policy_loss_sum.append(torch.cat(policy_loss_ep).sum())
            baseline_value_batch.append(torch.cat(value_losses).sum())
            # update gradients
            if (index + 1
                ) % batch_sz == 0:  # take the average of samples, backprop
                finish_episode(policy_loss_sum, encoder_loss_sum,
                               baseline_value_batch)
                del policy_loss_sum[:], encoder_loss_sum[:], baseline_value_batch[:]

            if (index + 1) % 2000 == 0:
                print(f'\n current episode: {index + 1}')
                # log the current position of the text which the agent has gone through
                print('curr_step: ', curr_step)
                # log the sum of the rereading and skimming times
                print(f'current reread_or_skim_times: {reread_or_skim_times}')

        print('Epoch time elapsed: %.2f s' % (time.time() - start))
        print('reread_or_skim_times in this epoch:', reread_or_skim_times)
        count_all, count_correct = evaluate(clstm, policy_s, policy_n,
                                            policy_c, valid_iterator)
        print('Epoch: %s, Accuracy on the validation set: %.2f' %
              (epoch + 1, count_correct / count_all))
        count_all, count_correct = evaluate(clstm, policy_s, policy_n,
                                            policy_c, train_iterator)
        print('Epoch: %s, Accuracy on the training set: %.2f' %
              (epoch + 1, count_correct / count_all))

    print('Compute the accuracy on the testing set...')
    count_all, count_correct = evaluate(clstm, policy_s, policy_n, policy_c,
                                        test_iterator)
    print('Accuracy on the testing set: %.2f' % (count_correct / count_all))
Beispiel #24
0
def _main_(args):
    config_path = args.conf

    with open(config_path) as config_buffer:    
        config = json.loads(config_buffer.read())

    ###############################
    #   Parse the annotations 
    ###############################
    train_ints, valid_ints, labels, max_box_per_image = create_training_instances(
        config['train']['train_annot_folder'],
        config['train']['train_image_folder'],
        config['train']['cache_name'],
        config['valid']['valid_annot_folder'],
        config['valid']['valid_image_folder'],
        config['valid']['cache_name'],
        config['model']['labels']
    )
    print('\nTraining on: \t' + str(labels) + '\n')

    ###############################
    #   Create the generators 
    ###############################    
    train_generator = BatchGenerator(
        instances           = train_ints, 
        anchors             = config['model']['anchors'],   
        labels              = labels,        
        downsample          = 32, # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image   = max_box_per_image,
        batch_size          = config['train']['batch_size'],
        min_net_size        = config['model']['min_input_size'],
        max_net_size        = config['model']['max_input_size'],   
        shuffle             = True, 
        jitter              = 0.3, 
        norm                = normalize
    )
    
    valid_generator = BatchGenerator(
        instances           = valid_ints, 
        anchors             = config['model']['anchors'],   
        labels              = labels,        
        downsample          = 32, # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image   = max_box_per_image,
        batch_size          = config['train']['batch_size'],
        min_net_size        = config['model']['min_input_size'],
        max_net_size        = config['model']['max_input_size'],   
        shuffle             = True, 
        jitter              = 0.0, 
        norm                = normalize
    )

    ###############################
    #   Create the model 
    ###############################
    if os.path.exists(config['train']['saved_weights_name']): 
        config['train']['warmup_epochs'] = 0
    warmup_batches = config['train']['warmup_epochs'] * (config['train']['train_times']*len(train_generator))   

    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    multi_gpu = len(config['train']['gpus'].split(','))

    train_model, infer_model = create_model(
        nb_class            = len(labels), 
        anchors             = config['model']['anchors'], 
        max_box_per_image   = max_box_per_image, 
        max_grid            = [config['model']['max_input_size'], config['model']['max_input_size']], 
        batch_size          = config['train']['batch_size'], 
        warmup_batches      = warmup_batches,
        ignore_thresh       = config['train']['ignore_thresh'],
        multi_gpu           = multi_gpu,
        saved_weights_name  = config['train']['saved_weights_name'],
        lr                  = config['train']['learning_rate'],
        grid_scales         = config['train']['grid_scales'],
        obj_scale           = config['train']['obj_scale'],
        noobj_scale         = config['train']['noobj_scale'],
        xywh_scale          = config['train']['xywh_scale'],
        class_scale         = config['train']['class_scale'],
    )

    ###############################
    #   Kick off the training
    ###############################
    callbacks = create_callbacks(config['train']['saved_weights_name'], config['train']['tensorboard_dir'], infer_model)

    train_model.fit_generator(
        generator        = train_generator, 
        steps_per_epoch  = len(train_generator) * config['train']['train_times'], 
        epochs           = config['train']['nb_epochs'] + config['train']['warmup_epochs'], 
        verbose          = 2 if config['train']['debug'] else 1,
        callbacks        = callbacks, 
        workers          = 4,
        max_queue_size   = 8
    )

    # make a GPU version of infer_model for evaluation
    if multi_gpu > 1:
        infer_model = load_model(config['train']['saved_weights_name'])

    ###############################
    #   Run the evaluation
    ###############################   
    # compute mAP for all the classes
    average_precisions = evaluate(infer_model, valid_generator)

    # print the score
    for label, average_precision in average_precisions.items():
        print(labels[label] + ': {:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))           
Beispiel #25
0
        def on_epoch_end(self, epoch, logs=None):
            if not test_set == None:
                # Samples using current model
                import matplotlib.pyplot as plt
                from skimage.transform import resize
                plasma = plt.get_cmap('plasma')

                minDepth, maxDepth = 10, 1000

                train_samples = []
                test_samples = []

                for i in range(self.num_samples):
                    x_train, y_train = train_generator.__getitem__(
                        self.train_idx[i], False)
                    x_test, y_test = test_generator[self.test_idx[i]]

                    x_train, y_train = x_train[0], np.clip(
                        DepthNorm(y_train[0], maxDepth=1000), minDepth,
                        maxDepth) / maxDepth
                    x_test, y_test = x_test[0], np.clip(
                        DepthNorm(y_test[0], maxDepth=1000), minDepth,
                        maxDepth) / maxDepth

                    h, w = y_train.shape[0], y_train.shape[1]

                    rgb_train = resize(x_train, (h, w),
                                       preserve_range=True,
                                       mode='reflect',
                                       anti_aliasing=True)
                    rgb_test = resize(x_test, (h, w),
                                      preserve_range=True,
                                      mode='reflect',
                                      anti_aliasing=True)

                    gt_train = plasma(y_train[:, :, 0])[:, :, :3]
                    gt_test = plasma(y_test[:, :, 0])[:, :, :3]

                    predict_train = plasma(
                        predict(model,
                                x_train,
                                minDepth=minDepth,
                                maxDepth=maxDepth)[0, :, :, 0])[:, :, :3]
                    predict_test = plasma(
                        predict(model,
                                x_test,
                                minDepth=minDepth,
                                maxDepth=maxDepth)[0, :, :, 0])[:, :, :3]

                    train_samples.append(
                        np.vstack([rgb_train, gt_train, predict_train]))
                    test_samples.append(
                        np.vstack([rgb_test, gt_test, predict_test]))

                self.writer.add_summary(
                    tf.Summary(value=[
                        tf.Summary.Value(tag='Train',
                                         image=make_image(
                                             255 * np.hstack(train_samples)))
                    ]), epoch)
                self.writer.add_summary(
                    tf.Summary(value=[
                        tf.Summary.Value(tag='Test',
                                         image=make_image(
                                             255 * np.hstack(test_samples)))
                    ]), epoch)

                # Metrics
                e = evaluate(model,
                             test_set['rgb'],
                             test_set['depth'],
                             test_set['crop'],
                             batch_size=6,
                             verbose=True)
                logs.update({'rel': e[3]})
                logs.update({'rms': e[4]})
                logs.update({'log10': e[5]})

            super().on_epoch_end(epoch, logs)
Beispiel #26
0
    hparams = {
        'name': 'knrm',
        'batch_size': 100,
        'title_size': 20,
        'his_size': 50,
        'npratio': 4,
        'embedding_dim': 300,
        'kernel_num': 11,
        'metrics': 'group_auc,ndcg@5,ndcg@10,mean_mrr',
        'attrs': ['title'],
    }

    hparams = load_hparams(hparams)
    device = torch.device(hparams['device'])

    vocab, loader_train, loader_test, loader_validate = prepare(hparams,
                                                                validate=True)
    knrmModel = KNRMModel(vocab=vocab, hparams=hparams).to(device)

    if hparams['mode'] == 'test':
        knrmModel.load_state_dict(torch.load(hparams['save_path']))
        print("testing...")
        evaluate(knrmModel, hparams, loader_test)

    elif hparams['mode'] == 'train':
        train(knrmModel,
              hparams,
              loader_train,
              loader_test,
              loader_validate,
              tb=True)
Beispiel #27
0
def _main():
    config_path = './local_config.json'
    LABELS = read_category()

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())
    ''' Parse annotations '''
    config['model']['labels'] = LABELS
    train_ints, valid_ints, labels, max_box_per_image = create_train_valid_set(
        config['train']['train_annot_folder'],
        config['train']['train_image_folder'],
        config['valid']['valid_annot_folder'],
        config['valid']['valid_image_folder'], config['model']['labels'])
    ''' Create generators '''
    # check if images are normal after BatchGenerator
    batches = BatchGenerator(instances=np.append(train_ints, valid_ints),
                             anchors=config['model']['anchors'],
                             labels=labels,
                             downsample=32,
                             max_box_per_image=max_box_per_image,
                             batch_size=config['train']['batch_size'],
                             shuffle=False,
                             jitter=False)
    img = batches[0][0][0][5]
    # plt.imshow(img.astype('uint8'))

    train_generator = BatchGenerator(
        instances=train_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=max_box_per_image,
        batch_size=config['train']['batch_size'],
        # min_net_size=config['model']['min_input_size'],
        # max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=True,  # add 10% noise for each image for training
        norm=normalize)

    img = train_generator[0][0][0][5]
    # plt.imshow(img.astype('float'))

    valid_generator = BatchGenerator(
        instances=valid_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=max_box_per_image,
        batch_size=config['train']['batch_size'],
        # min_net_size=config['model']['min_input_size'],
        # max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=False,
        norm=normalize)
    ''' Create the model '''
    if os.path.exists(config['train']['saved_weights_name']):
        config['train']['warmup_epochs'] = 0
    warmup_batches = config['train']['warmup_epochs'] * (
        config['train']['train_times'] * len(train_generator))

    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    multi_gpu = len(config['train']['gpus'].split(','))

    train_model, infer_model = create_model(
        nb_class=len(labels),
        anchors=config['model']['anchors'],
        max_box_per_image=max_box_per_image,
        max_grid=[
            config['model']['max_input_size'],
            config['model']['max_input_size']
        ],
        batch_size=config['train']['batch_size'],
        warmup_batches=warmup_batches,
        ignore_thresh=config['train']['ignore_thresh'],
        multi_gpu=multi_gpu,
        saved_weights_name=config['train']['saved_weights_name'],
        lr=config['train']['learning_rate'],
        grid_scales=config['train']['grid_scales'],
        obj_scale=config['train']['obj_scale'],
        noobj_scale=config['train']['noobj_scale'],
        xywh_scale=config['train']['xywh_scale'],
        class_scale=config['train']['class_scale'],
    )

    print('\ntrain_model: \n')
    print(train_model.summary())
    print('\ninfer_model: \n')
    print(infer_model.summary())
    ''' Kick off the training '''
    callbacks = create_callbacks(config['train']['saved_weights_name'],
                                 config['train']['tensorboard_dir'],
                                 infer_model)
    train_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=len(
            train_generator),  # * config['train']['train_times'],
        epochs=config['train']['nb_epochs'] + config['train']['warmup_epochs'],
        verbose=2 if config['train']['debug'] else 1,
        callbacks=callbacks,
        workers=16,
        max_queue_size=8)

    # make a GPU version of infer_model for evaluation
    if multi_gpu > 1:
        infer_model = load_model(config['train']['saved_weights_name'])
    ''' Evaluate the result '''
    # compute mAP for all the classes
    average_precisions = evaluate(infer_model, valid_generator)

    # print the score
    for label, average_precision in average_precisions.items():
        print(labels[label] + ': {:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))
Beispiel #28
0
        callbacks        = callbacks, 
        workers          = 4,
        max_queue_size   = 8
    )
    print("[INFO] Saving Model...")

    print("[INFO] Start Evalutating Model...")
    # make a GPU version of infer_model for evaluation
    if multi_gpu > 1:
        infer_model = load_model(config['train']['saved_weights_name'])

    ###############################
    #   Run the evaluation
    ###############################   
    # compute mAP for all the classes
   average_precisions = evaluate(infer_model, valid_generator)
    ap = []

    # print the mAP score
    for label, average_precision in average_precisions.items():
        print(labels[label] + ' average precision(AP): {:.6f}'.format(average_precision['ap']))
        ap.append(average_precision['ap'])
        print(labels[label] + ' recall: {:.6f}'.format(average_precision['recall']))
        print(labels[label] + ' precision: {:.6f}'.format(average_precision['precision']))
    print('[INFO] mAP: {:.6f}'.format(sum(ap) / len(ap)))
    print("[INFO] Completed...")

if __name__ == '__main__':
    argparser = argparse.ArgumentParser(description='train and evaluate YOLO_v3 model on any dataset')
    argparser.add_argument('-c', '--conf', help='path to configuration file')   
def main(_):
    image_path = FLAGS.test
    csv_path = os.path.splitext(image_path)[0] + ".csv"
    
    # ---------- create model ----------------#
    x           = tf.placeholder("float", shape=[None, FLAGS.input_size * FLAGS.input_size])
    keep_prob   = tf.placeholder("float")
    global_step = tf.Variable(0, trainable=False, name='global_step')
    model  = nn.create(x, keep_prob)
    
    # ---------- restore model ---------------#
    saver = tf.train.Saver()
    if tf.train.latest_checkpoint(FLAGS.checkpoint_dir) != None:
        saver.restore(sess, tf.train.latest_checkpoint(FLAGS.checkpoint_dir))  
    
    # ---------- object detection ------------#    
    print 'starting detection of ' + FLAGS.test + '...'
    
    img = utils.getImage(image_path)
    img = cv2.copyMakeBorder(img, FLAGS.max_window_size, FLAGS.max_window_size, FLAGS.max_window_size, FLAGS.max_window_size, cv2.BORDER_REPLICATE) 
    
    start = time.time()
    
    #sliding window detection
    mask = create_mask(model, x, keep_prob, img)
    elapsed = time.time() - start
    print 'detection time: %d' % (elapsed)

    # ------------- evaluation --------------#
    global_step = tf.train.global_step(sess, global_step)
    
    ground_truth_data = utils.get_ground_truth_data(csv_path)
    ground_truth_data = [(x + FLAGS.max_window_size,y + FLAGS.max_window_size) for (x,y) in ground_truth_data]
        
    for th in [150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250]:        
        detected = mask_to_objects(mask, th)
        tp, fn, fp = utils.evaluate(ground_truth_data, detected)
        
        # ----------------output ----------------#
        # image output
        """
        img_out = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) * 255
        
        for (r,score) in candidates:
            cv2.rectangle(img_out, (r.x,r.y), (r.x2(), r.y2()), [200,200,200], 2)
        
        for r in detected:
            cv2.rectangle(img_out, (r.x,r.y), (r.x2(), r.y2()), [0,255,0], 2)
            
        for c in ground_truth_data:
            cv2.circle(img_out, (c[0], c[1]), 3, [0,0,255],3)
        
        output_file = "out" + '_' + str(global_step) + 'its_' + str(FLAGS.step_size) + 'step_' + str(th) + 'threshold_' + str(datetime.datetime.now())
        cv2.imwrite(FLAGS.output_dir + output_file + '.png', img_out)
        """
        
        # csv output
        with open(FLAGS.output_dir + 'results.csv', 'ab') as file:
            writer = csv.writer(file, delimiter=',')
            writer.writerow([FLAGS.test, str(elapsed), 
                            str(global_step), str(len(ground_truth_data)), str(th),
                            str(len(detected)), str(FLAGS.step_size), str(tp), str(fp), str(fn)])
Beispiel #30
0
 def eval_map(self):
     return evaluate(self.model, self.data_generator)
Beispiel #31
0
def _main_(args):
    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    ###############################
    #   Parse the annotations
    ###############################
    train_ints, valid_ints, labels, max_box_per_image = create_training_instances(
        config['train']['train_annot_folder'],
        config['train']['train_image_folder'], config['train']['cache_name'],
        config['valid']['valid_annot_folder'],
        config['valid']['valid_image_folder'], config['valid']['cache_name'],
        config['model']['labels'])
    print('\nTraining on: \t' + str(labels) + '\n')

    ###############################
    #   Create the generators
    ###############################
    train_generator = BatchGenerator(
        instances=train_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=max_box_per_image,
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.3,
        norm=normalize)

    valid_generator = BatchGenerator(
        instances=valid_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=max_box_per_image,
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.0,
        norm=normalize)

    ###############################
    #   Create the model
    ###############################
    if os.path.exists(config['train']['saved_weights_name']):
        config['train']['warmup_epochs'] = 0
    warmup_batches = config['train']['warmup_epochs'] * (
        config['train']['train_times'] * len(train_generator))

    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    multi_gpu = len(config['train']['gpus'].split(','))
    print('multi_gpu:' + str(multi_gpu))

    train_model, infer_model = create_model(
        nb_class=len(labels),
        anchors=config['model']['anchors'],
        max_box_per_image=max_box_per_image,
        max_grid=[
            config['model']['max_input_size'],
            config['model']['max_input_size']
        ],
        batch_size=config['train']['batch_size'],
        warmup_batches=warmup_batches,
        ignore_thresh=config['train']['ignore_thresh'],
        multi_gpu=multi_gpu,
        saved_weights_name=config['train']['saved_weights_name'],
        lr=config['train']['learning_rate'],
        grid_scales=config['train']['grid_scales'],
        obj_scale=config['train']['obj_scale'],
        noobj_scale=config['train']['noobj_scale'],
        xywh_scale=config['train']['xywh_scale'],
        class_scale=config['train']['class_scale'],
        backend=config['model']['backend'])

    ###############################
    #   Kick off the training
    ###############################
    callbacks = create_callbacks(config['train']['saved_weights_name'],
                                 config['train']['tensorboard_dir'],
                                 infer_model)

    train_model.fit(
        x=train_generator,
        validation_data=valid_generator,
        steps_per_epoch=len(train_generator) * config['train']['train_times'],
        epochs=config['train']['nb_epochs'] + config['train']['warmup_epochs'],
        verbose=2 if config['train']['debug'] else 1,
        workers=4,
        max_queue_size=8,
        callbacks=callbacks)

    train_model.load_weights(config['train']['saved_weights_name'])
    infer_model.save(config['train']['saved_weights_name'])
    ###############################
    #   Run the evaluation
    ###############################
    # compute mAP for all the classes
    average_precisions = evaluate(infer_model, valid_generator)

    # print the score
    total_instances = []
    precisions = []
    for label, (average_precision,
                num_annotations) in average_precisions.items():
        print('{:.0f} instances of class'.format(num_annotations),
              labels[label],
              'with average precision: {:.4f}'.format(average_precision))
        total_instances.append(num_annotations)
        precisions.append(average_precision)

    if sum(total_instances) == 0:
        print('No test instances found.')
        return

    print('mAP using the weighted average of precisions among classes: {:.4f}'.
          format(
              sum([a * b for a, b in zip(total_instances, precisions)]) /
              sum(total_instances)))
    print('mAP: {:.4f}'.format(
        sum(precisions) / sum(x > 0 for x in total_instances)))
Beispiel #32
0
def _main_(args):
    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    ###############################
    #   Parse the annotations
    ###############################
    train_ints, valid_ints, labels, max_box_per_image = create_training_instances(
        config['train']['train_annot_folder'],
        config['train']['train_image_folder'], config['train']['cache_name'],
        config['valid']['valid_annot_folder'],
        config['valid']['valid_image_folder'], config['valid']['cache_name'],
        config['model']['labels'])
    print('\nTraining on: \t' + str(labels) + '\n')

    ###############################
    #   Create the generators
    ###############################
    train_generator = BatchGenerator(
        instances=train_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=max_box_per_image,
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.3,
        norm=normalize)

    valid_generator = BatchGenerator(
        instances=valid_ints,
        anchors=config['model']['anchors'],
        labels=labels,
        downsample=
        32,  # ratio between network input's size and network output's size, 32 for YOLOv3
        max_box_per_image=max_box_per_image,
        batch_size=config['train']['batch_size'],
        min_net_size=config['model']['min_input_size'],
        max_net_size=config['model']['max_input_size'],
        shuffle=True,
        jitter=0.0,
        norm=normalize)

    ###############################
    #   Create the model
    ###############################
    if os.path.exists(config['train']['saved_weights_name']):
        config['train']['warmup_epochs'] = 0
    warmup_batches = config['train']['warmup_epochs'] * (
        config['train']['train_times'] * len(train_generator))

    os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
    multi_gpu = len(config['train']['gpus'].split(','))

    train_model, infer_model = create_model(
        nb_class=len(labels),
        anchors=config['model']['anchors'],
        max_box_per_image=max_box_per_image,
        max_grid=[
            config['model']['max_input_size'],
            config['model']['max_input_size']
        ],
        batch_size=config['train']['batch_size'],
        warmup_batches=warmup_batches,
        ignore_thresh=config['train']['ignore_thresh'],
        multi_gpu=multi_gpu,
        saved_weights_name=config['train']['saved_weights_name'],
        lr=config['train']['learning_rate'],
        grid_scales=config['train']['grid_scales'],
        obj_scale=config['train']['obj_scale'],
        noobj_scale=config['train']['noobj_scale'],
        xywh_scale=config['train']['xywh_scale'],
        class_scale=config['train']['class_scale'],
    )

    ###############################
    #   Kick off the training
    ###############################
    callbacks = create_callbacks(config['train']['saved_weights_name'],
                                 config['train']['tensorboard_dir'],
                                 infer_model)

    train_model.fit_generator(
        generator=train_generator,
        steps_per_epoch=len(train_generator) * config['train']['train_times'],
        epochs=config['train']['nb_epochs'] + config['train']['warmup_epochs'],
        verbose=2 if config['train']['debug'] else 1,
        callbacks=callbacks,
        workers=4,
        max_queue_size=8)

    # make a GPU version of infer_model for evaluation
    if multi_gpu > 1:
        infer_model = load_model(config['train']['saved_weights_name'])

    ###############################
    #   Run the evaluation
    ###############################
    # compute mAP for all the classes
    average_precisions = evaluate(infer_model, valid_generator)

    # print the score
    for label, average_precision in average_precisions.items():
        print(labels[label] + ': {:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))
Beispiel #33
0
def main():
    text = 'Description of the program arguments'

    parser = argparse.ArgumentParser(description=text)
    parser.add_argument("--train_path",
                        "-t",
                        help="set path where to find training data")
    parser.add_argument("--train_start",
                        help="start data fraction for train data (default 0)")
    parser.add_argument("--train_end",
                        help="end data fraction for train data (default 1)")
    parser.add_argument("--test_path",
                        "-T",
                        help="set path where to find test data")
    parser.add_argument("--test_start",
                        help="start data fraction for test data (default 0)")
    parser.add_argument("--test_end",
                        help="end data fraction for test data (default 1)")
    parser.add_argument("--embedding_path",
                        "-e",
                        help="set path where to find french word embeddings")
    parser.add_argument(
        "--mode",
        "-m",
        help=
        "set mode:\n - 'prediction' / 'e': predict only \n - 'evaluation' / 'e': predict and "
        "evaluate predictions")
    parser.add_argument(
        "--output_path",
        "-o",
        help=
        "set path where to write predictions (if None nothing will be written)"
    )
    parser.add_argument(
        "--beam",
        "-b",
        help="set beam search size for cyk algorithm (default 10)")

    args = parser.parse_args()

    def change_none(x, val):
        return val if x is None else x

    train_path = args.train_path
    train_start = float(change_none(args.train_start, 0))
    train_end = float(change_none(args.train_end, 1))
    test_path = args.test_path
    test_start = float(change_none(args.test_start, 0))
    test_end = float(change_none(args.test_end, 1))
    embedding_path = args.embedding_path
    mode = args.mode
    beam = int(change_none(args.beam, 10))
    output_path = args.output_path

    assert mode in ('prediction', 'evaluation', 'e', 'p'), mode

    print("#" * 100 + '\n##')
    print('##\t- Build grammar from file: %s' % train_path)
    print('##\t- Build oov module from embeddings stored in: %s' %
          embedding_path)
    print('##\t- Make {} based on cyk (beam: {}) on sentences in file: {}'.
          format(
              'predictions' if mode in ('prediction', 'p') else 'evaluations',
              beam, test_path))
    if output_path is not None:
        print('##\t- Store predictions in file: %s' % output_path)
    else:
        print("## Don't save predictions")
    print('##\n' + "#" * 100)

    train_vocabulary, train_grammar_rules, train_rhs_index, train_unary_dic, train_prob_lexicon = get_train_data(
        train_path, train_start, train_end)

    if mode in ('prediction', 'p'):
        test_sentences = get_to_predict_data(test_path, test_start, test_end)
    elif mode in ('evaluation', 'e'):
        test_sentences, test_labels = get_to_eval_data(test_path, test_start,
                                                       test_end)
    else:
        raise ValueError(
            "Should be 'prediction', 'p', 'evaluation' or 'e', not %s" % mode)
    # load French word embeddings
    fr_words, embeddings, word_id, id_word = get_embeddings(embedding_path)

    # Normalize digits by replacing them with #
    DIGITS = re.compile("[0-9]", re.UNICODE)

    # considered transformations when looking for in vocabulary words
    TRANSFOS = [
        lambda w: DIGITS.sub("#", w), lambda w: w.lower(), lambda w: w.upper(),
        lambda w: w.title()
    ]

    train_embeddings, train_word_id, voc_id_word = process_embeddings(
        word_embeddings=embeddings,
        word_id_dic=word_id,
        vocabulary=train_vocabulary,
        re_rules=[lambda s: DIGITS.sub("#", s)])

    def oov_handler(word):
        return oov(word,
                   train_vocabulary,
                   fr_words,
                   all_embs=embeddings,
                   all_word_id_dic=word_id,
                   voc_embs=train_embeddings,
                   voc_id_word_dic=voc_id_word,
                   transformations=TRANSFOS,
                   k=2)

    print("Vocabulary-specific embedding shape is {}".format(
        train_embeddings.shape))

    if mode in ('evaluation', 'e'):
        parsed_str, score, parsed = evaluate(test_sentences,
                                             test_labels,
                                             train_grammar_rules,
                                             train_prob_lexicon,
                                             train_rhs_index,
                                             train_unary_dic,
                                             oov_handler,
                                             p_output=True,
                                             beam=beam,
                                             chrono=True)
    elif mode in ('prediction', 'p'):
        parsed_str, parsed = predict(test_sentences,
                                     train_grammar_rules,
                                     train_prob_lexicon,
                                     train_rhs_index,
                                     train_unary_dic,
                                     oov_handler,
                                     p_output=True,
                                     beam=beam,
                                     chrono=True)
    if output_path is not None:
        print("Write predictions in %s..." % output_path, end=' ')
        write_in_file(output_path, parsed_str)
        print('Done')
    os.makedirs(os.path.join(opt.output_dir, "models"), exist_ok=True)
    os.makedirs(os.path.join(opt.output_dir, "tensorboard"), exist_ok=True)
    writer = SummaryWriter(log_dir=os.path.join(opt.output_dir, "tensorboard"))

    # ----------
    #  Training
    # ----------
    if opt.train:
        train(model_G=model_G,
              model_D=model_D,
              embedder=encoder,
              optimizer_G=optimizer_G,
              optimizer_D=optimizer_D,
              scheduler_G=scheduler_G,
              scheduler_D=scheduler_D,
              train_loader=train_dataloader,
              val_loader=val_dataloader,
              adv_loss=adversarial_loss,
              opt=opt,
              onehot_encoder=onehot_encoder)
    else:
        assert opt.model_checkpoint is not None, 'no model checkpoint specified'
        print("Loading model from state dict...")
        load_model(opt.model_checkpoint, model_G)
        print("Model loaded.")
        evaluate(model_G=model_G,
                 embedder=encoder,
                 test_loader=val_dataloader,
                 device=device,
                 p=opt.model_checkpoint)
Beispiel #35
0
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        self.epochs_since_last_save += 1
        if self.epochs_since_last_save >= self.period:
            self.epochs_since_last_save = 0
            filepath = self.filepath.format(epoch=epoch + 1, **logs)
            if self.save_best_only:
                current = logs.get(self.monitor)
                if current is None:
                    warnings.warn(
                        'Can save best model only with %s available, '
                        'skipping.' % (self.monitor), RuntimeWarning)
                else:
                    if self.monitor_op(current, self.best):
                        if self.verbose > 0:
                            print(
                                '\nEpoch %05d: %s improved from %0.5f to %0.5f,'
                                ' saving model to %s' %
                                (epoch + 1, self.monitor, self.best, current,
                                 filepath))
                        self.best = current
                        # print(self.save_weights_only)
                        if self.save_weights_only:
                            self.model_to_save.save_weights(filepath,
                                                            overwrite=True)
                        else:
                            self.model_to_save.save(filepath, overwrite=True)

                    else:
                        if self.verbose > 0:
                            print(
                                '\nEpoch %05d: %s did not improve from %0.5f' %
                                (epoch + 1, self.monitor, self.best))
            # # normal save
            # if self.verbose > 0:
            #     print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
            # if self.save_weights_only:
            #     self.model_to_save.save_weights(filepath, overwrite=True)
            # else:
            #     self.model_to_save.save(filepath, overwrite=True)
            else:
                if self.verbose > 0:
                    print('\nEpoch %05d: saving model to %s' %
                          (epoch + 1, filepath))
                if self.save_weights_only:
                    self.model_to_save.save_weights(filepath, overwrite=True)
                else:
                    self.model_to_save.save(filepath, overwrite=True)
        if self.addtion_save:
            if (epoch + 1) % 5 == 0 and (epoch + 1) > 19:
                average_precisions = evaluate(self.model_to_save,
                                              self.valid_data)
                ap = []
                print('[INFO] Epoch: %05d' % (epoch + 1))
                # print the mAP score
                for label, average_precision in average_precisions.items():
                    print('\n' + self.labels[label] +
                          ' average precision(AP): {:.6f}'.format(
                              average_precision['ap']))
                    ap.append(average_precision['ap'])
                    print(
                        self.labels[label] +
                        ' recall: {:.6f}'.format(average_precision['recall']))
                    print(self.labels[label] + ' precision: {:.6f}'.format(
                        average_precision['precision']))
                mAP = sum(ap) / len(ap)
                print('[INFO] mAP: {:.6f}'.format(mAP))
                if self.best_mAP < mAP:
                    print(
                        '[INFO] Best mAP improve from {:.6f} to {:.6f}'.format(
                            self.best_mAP, mAP))
                    self.best_mAP = mAP
                    self.model_to_save.save(
                        str(self.addtion_save).split('.')[0] + '_mAP_best.h5',
                        overwrite=True)
                else:
                    print('[INFO] Best mAP did not improve from {:.6f}'.format(
                        self.best_mAP))
            if (epoch + 1) % 10 == 0:
                self.model_to_save.save(str(self.addtion_save).split('.')[0] +
                                        '_%04d.h5' % (epoch + 1),
                                        overwrite=True)
        super(CustomModelCheckpoint, self).on_batch_end(epoch, logs)