예제 #1
0
def main(mode: str, epochs: int, weights_name: str):
    if (mode == "train"):
        print("Train mode was chosen.")
        test_split = 0.05
        train(epochs=epochs, save_model=True, test_split=test_split)
        # Use the weights just trained to run a prediction on the test samples to see how the model performs.
        predict(model_file_name="model", test_split=test_split)
    elif (mode == "predict"):
        print("Predict mode was chosen.")
        predict(model_file_name=weights_name, test_split=0.05)
예제 #2
0
def run():
    query = request.args.get('query')
    if query == None:
        return jsonify({"error": "Please specify query!"})
    else:
        prediction = predict.predict(query)
    return jsonify(prediction)
def main():
    # If someone simply goes to the webpage, open the html for that webpage
    if flask.request.method == 'GET':
        return (flask.render_template('hashtag_project.html'))

    # if someone submits a picture, extract the deep features, run through the model
    # return the input and the predictions
    if flask.request.method == 'POST':
        # get the input picture
        if 'file' not in flask.request.files:
            flask.flash('no file part')
            # if there is no input picture, redirect back to the original url
            return flask.redirect(flask.request.url)
        # get file
        file = flask.request.files['file']
        # get the number of hashtags needed to return
        n = flask.request.form.get('n')
        # create secure file name
        filename = secure_filename(file.filename)
        # save file to uploads folder
        path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
        file.save(path)
        features = predict(path)
        prediction = predict_hashtags(input_features=features, n=n)
        # get the number of hashtags to recommend from the form

        return flask.render_template(
            'hashtag_project.html',
            filename=filename,
            result=prediction,
        )
예제 #4
0
def run(action, root_dir, data_key, model_key, save_path):
    run_name = "_".join((model_key, data_key))
    config = init_wandb_session(run_name, action)

    criterion = CrossEntropyLoss()
    model_path = join(save_path, run_name + '.pt')

    if action == 'train':
        logging.info("Training the model: {} with dataset: {}".format(model_key, data_key))
        train_set, val_set = get_dataset(data_key, root_dir, True)
        model = NETWORKS[model_key](input_filters=train_set[0][0].shape[0], num_classes=len(train_set.classes))
        model.to(run_device)
        train_loader = DataLoader(train_set, batch_size=config.batch_size, shuffle=True)
        val_loader = DataLoader(val_set, batch_size=config.batch_size, shuffle=False)
        optimizer = SGD(model.parameters(), lr=config.learning_rate, momentum=config.momentum)
        scheduler = ReduceLROnPlateau(optimizer, 'min', min_lr=config.min_learning_rate, patience=10)
        makedirs(save_path, exist_ok=True)
        train(model, train_loader, val_loader, criterion, optimizer, scheduler, config.num_epochs, model_path)
    else:
        logging.info("Testing the model: {} with dataset: {}".format(model_key, data_key))
        model = torch.load(model_path)
        test_set = get_dataset(data_key, root_dir, False)
        test_loader = DataLoader(test_set, batch_size=config.batch_size, shuffle=False)
        metrics = predict(model, test_loader, criterion)
        log_pred_metrics(metrics)
예제 #5
0
def predict_route():
    result = predict(flask.request.json)

    response = flask.Response(json.dumps(result, cls=NpEncoder))
    response.headers['content-type'] = 'application/json'

    return (response)
def select_spacing(input_, model, type_='konlpy'):
    if type_.lower() == 'konlpy':
        return getNouns(input_)
    elif type_.lower() == 'self_product':
        return predict(input_, model=model)
    else:
        raise print('we have only konlpy, self_product models')
예제 #7
0
def home():
    result = None
    if request.method == "POST":
        query = request.form.get('query')
        result = predict.predict(query)
    print(f"before rendering: result = {result}")
    return render_template("home.html", feedback=result)
예제 #8
0
    def predict(self):
        result = predict(self.currentImagePath)
        for i in range(101):
            self.progressBar.setValue(i)

        if not result:
            result = 'Error'
        self.labelPrediccion.setText(result)
예제 #9
0
def predict_images():

    data = request.files.get("file")
    if data == None:
        return 'Got Nothing'
    else:
        prediction = predict.predict(data)

    return json.dumps(str(prediction))
예제 #10
0
    def post(self):
        postedData = request.get_json()

        encodeString = postedData["encodeString"]

        try:
            result = predict(encodeString)
            return {"status": 200, "result": result}
        except:
            abort(400, "Bad request, predict failed.")
예제 #11
0
def main():
    final_model = train_model()
    predictions_df = predict(final_model)
    print(predictions_df)
    driver_alerts = set(
        predictions_df[predictions_df['alert'] == True]['driver_id'])
    for driver in driver_alerts:
        send_sms(driver)


#if __name__ == '__main__':
#    main()
예제 #12
0
def update_histogram(datePicked, selection, n_clicks):
    if n_clicks is None:
        return {}

    model = load_model()
    df_predict = predict(model, sms=True)
    minima = min(df_predict['predictions'])
    maxima = max(df_predict['predictions'])

    norm = matplotlib.colors.Normalize(vmin=minima, vmax=maxima, clip=True)
    mapper = cm.ScalarMappable(norm=norm, cmap=cm.coolwarm)
    colors = []
    for v in df_predict['predictions']:
        color = matplotlib.colors.to_hex(mapper.to_rgba(v))
        print(color)
        colors.append(color)

    layout = go.Layout(
        bargap=0.01,
        bargroupgap=0,
        barmode="group",
        margin=go.layout.Margin(l=10, r=0, t=0, b=50),
        showlegend=False,
        plot_bgcolor="#323130",
        paper_bgcolor="#323130",
        dragmode="select",
        font=dict(color="white"),
        xaxis=dict(
            range=[0.5, len(df_predict)+0.5],
            showgrid=False,
            nticks=len(df_predict),
            fixedrange=True,
            tickprefix='Driver #'
        ),
        yaxis=dict(
            range=[0, maxima],
            showticklabels=False,
            showgrid=False,
            fixedrange=True,
            rangemode="nonnegative",
            zeroline=False,
        ),
    )

    return go.Figure(
        data=[
            go.Bar(x=df_predict['driver_id'], y=df_predict['predictions'], marker=dict(color=colors),
                     hovertext=df_predict['priority']
    ),
        ],
        layout=layout,
    )
예제 #13
0
def train(model, train_loader, val_loader, criterion, optimizer, scheduler,
          num_epochs, model_path):
    # wandb.watch(model, log='all')
    val_loss_min = Inf

    for epoch in range(1, num_epochs + 1):
        train_metrics = train_model(model, train_loader, criterion, optimizer)
        val_metrics = predict(model, val_loader, criterion)
        log_metrics(epoch, train_metrics, val_metrics)
        scheduler.step(val_metrics['loss'])
        val_loss_min = save_model(model, model_path, val_metrics['loss'],
                                  val_loss_min)

    wandb.save(model_path)
예제 #14
0
def classify_img():
    if request.method == 'OPTIONS':
        return preflight_res()

    req_body = request.get_json(force=True)
    img_data = req_body['image']
    img_arr = np.fromstring(b64decode(img_data), np.uint8)
    img_arr = cv2.imdecode(img_arr, cv2.COLOR_BGR2RGB)
    activation = predict(img_arr)

    if activation >= 0.5:
        animal = "cat"
    else:
        animal = "dog"

    res_body = {'animal': animal, 'activation': str(activation)}
    return create_res(res_body)
def detect_on_dataset_ui(model):
    sg.theme(cfg.UI.THEME)
    layout = [
        [sg.Text('Dataset Folder Path:')],
        [
            sg.Input(key='-INPUT-PATH-', enable_events=True),
            sg.FolderBrowse(initial_folder='./')
        ], [sg.Text('Prediction Output Path:')],
        [sg.Input(key='-OUTPUT-PATH-'),
         sg.FolderBrowse(initial_folder='./')],
        [sg.Text('Annotation Output Path')],
        [sg.Input(key='-ANNO-PATH-'),
         sg.FileSaveAs(initial_folder='./')], [sg.Text('Input File Type:')],
        [
            sg.Combo(('.jpg', '.png', '.bmp', '.tif', '.tiff', '.gif'),
                     readonly=True,
                     default_value='.jpg',
                     size=(15, 1),
                     key='-FILE-TYPE-')
        ],
        [
            sg.Text('Confidence Threshold'),
            sg.Slider((0, 1), 0.4, 0.05, orientation='h', key='-CONF-SLIDER-'),
            sg.Text('NMS IOU Threshold'),
            sg.Slider((0, 1), 0.45, 0.05, orientation='h', key='-NMS-SLIDER-')
        ], [sg.B('Detect'), sg.B('Exit')]
    ]
    window = Window('Detect Dataset', layout, font=(cfg.UI.FONT, 12))
    while True:
        event, value = window.read()

        if event in ['Exit', None]:
            break
        # Update output path and annotation file path according to input path
        if event in ['-INPUT-PATH-']:
            window['-OUTPUT-PATH-'].update(
                os.path.join(value['-INPUT-PATH-'], 'Prediction'))
            window['-ANNO-PATH-'].update(
                os.path.join(value['-INPUT-PATH-'], 'annotation.txt'))
        # Start detection
        if event in ['Detect']:
            input_path = value['-INPUT-PATH-']
            output_path = value['-OUTPUT-PATH-']
            anno_path = value['-ANNO-PATH-']
            file_type = value['-FILE-TYPE-']
            nms_iou = value['-NMS-SLIDER-']
            conf_thresh = value['-CONF-SLIDER-']
            anno_file = None
            try:
                anno_file = open(anno_path, 'w')
                images = os.listdir(input_path)
                if not os.path.exists(output_path):
                    os.mkdir(output_path)
                # Count file conform the given file type
                max_process = [x.endswith(file_type)
                               for x in images].count(True)
                cur_process = 1
                window.disable()

                for image in images:
                    if image.endswith(file_type):
                        # If user push cancel button on the progress popup
                        if not sg.one_line_progress_meter(
                                'Generating Prediction',
                                cur_process,
                                max_process,
                                key='-PROGRESS-'):
                            break

                        img_RGB = io.imread(os.path.join(input_path, image))
                        img = cv2.cvtColor(img_RGB, cv2.COLOR_RGB2BGR)
                        # Predict
                        bboxes, _ = predict(img_RGB, model, nms_iou,
                                            conf_thresh)
                        pred_img = util.draw_bbox(img.copy(), bboxes)
                        cv2.imwrite(os.path.join(output_path, image), pred_img)
                        anno_line = util.encode_annotation(bboxes, image)
                        anno_file.write(anno_line.strip() + '\n')
                        cur_process += 1

            except IOError as e:
                sg.PopupError("Invalid input file,\n{}".format(e))
                sg.one_line_progress_meter('Generating Prediction',
                                           1,
                                           1,
                                           key='-PROGRESS-')
            finally:
                if anno_file is not None:
                    anno_file.close()
                window.enable()

            sg.popup_ok("Detection finished", title="Finish")

    window.close()
    del window
    del layout
예제 #16
0
def update_graph(datePicked, selectedData, selectedLocation, n_clicks):
    if n_clicks is None:
        return {}
    zoom = 13.0
    latInitial = 42.3601
    lonInitial = -71.0942
    bearing = 0

    if selectedLocation:
        zoom = 15.0
        latInitial = list_of_locations[selectedLocation]["lat"]
        lonInitial = list_of_locations[selectedLocation]["lon"]

    model = load_model()
    df_predict = predict(model)

    minima = min(df_predict['predictions'])
    maxima = max(df_predict['predictions'])

    norm = matplotlib.colors.Normalize(vmin=minima, vmax=maxima, clip=True)
    mapper = cm.ScalarMappable(norm=norm, cmap=cm.coolwarm)
    colors = []
    for v in df_predict['predictions']:
        color = matplotlib.colors.to_hex(mapper.to_rgba(v))
        colors.append(color)
    sizes = list((10 + df_predict['predictions']*100).astype(int))

    return go.Figure(
        data=[

            Scattermapbox(
                lat=[list_of_locations[i]["lat"] for i in list_of_locations],
                lon=[list_of_locations[i]["lon"] for i in list_of_locations],
                mode="markers",
                hoverinfo="text",
                text=[i for i in list_of_locations],
                marker=dict(size=sizes, color=colors

                            ),
            ),
        ],
        layout=Layout(
            autosize=True,
            margin=go.layout.Margin(l=0, r=35, t=0, b=0),
            showlegend=False,
            mapbox=dict(
                accesstoken=mapbox_access_token,
                center=dict(lat=latInitial, lon=lonInitial),
                style="dark",
                bearing=bearing,
                zoom=zoom,
            ),
            updatemenus=[
                dict(
                    buttons=(
                        [
                            dict(
                                args=[
                                    {
                                        "mapbox.zoom": 12,
                                        "mapbox.center.lon": "-73.991251",
                                        "mapbox.center.lat": "40.7272",
                                        "mapbox.bearing": 0,
                                        "mapbox.style": "dark",
                                    }
                                ],
                                label="Reset Zoom",
                                method="relayout",
                            )
                        ]
                    ),
                    direction="left",
                    pad={"r": 0, "t": 0, "b": 0, "l": 0},
                    showactive=False,
                    type="buttons",
                    x=0.45,
                    y=0.02,
                    xanchor="left",
                    yanchor="bottom",
                    bgcolor="#323130",
                    borderwidth=1,
                    bordercolor="#6d6d6d",
                    font=dict(color="#FFFFFF"),
                )
            ],
        ),
    )
예제 #17
0
    args_ = parser.parse_args()

    return args_


if __name__ == '__main__':
    # Parse script arguments
    args = parse_arguments()

    # Parse yaml config file parameters

    with open(args.config_file_path) as yaml_file:
        config_params = yaml.load(yaml_file, Loader=yaml.FullLoader)

    if args.mode == 'train':
        logging.basicConfig(filename='training.log', level=logging.INFO, filemode='w')
        train.train(data_path_source_dir_=config_params['data_params']['data_dir_path'],
                    training_params=config_params['training_params'],
                    model_params=config_params['model_params'])

    elif args.mode == 'predict':
        logging.basicConfig(filename='prediction.log', level=logging.INFO, filemode='w')
        predict.predict(data_path_source_dir_=config_params['data_params']['data_dir_path'],
                        training_params=config_params['training_params'],
                        model_params=config_params['model_params'])

    else:
        raise Exception('Script can only be ran with --mode `train` or `predict`')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
예제 #18
0
arg_parse.add_argument('-m',
                       '--modelname',
                       required=False,
                       help='choose from lr or svm',
                       default='svm')
arg_parse.add_argument('-f',
                       '--inputfile',
                       required=False,
                       help='path to data file (.csv)',
                       default='model/model_data/default_test.csv')

args = vars(arg_parse.parse_args())

script_name = args['script_name']
model_name = args['modelname']
inputfile = args['inputfile']
args = None

if script_name == 'Predict':
    predict(model_name, inputfile)
elif script_name == 'Train':
    if inputfile == None:
        raise Exception('Input csv trained data file is missing.')
    model_train(model_name, inputfile)
elif script_name == 'Evaluate':
    inputfile = 'model/model_data/eval.csv'
    evalaute(model_name, inputfile)
elif script_name == None:
    # It does automatic btw
    print(
        'Please provide a valid method to run from [Predict, Train, Evaluate]')
예제 #19
0
def do_predict():
    result = predict()
    return result
예제 #20
0
파일: routes.py 프로젝트: TilakSN/guessture
def guess():
    file = request.files['video-file']
    path = os.path.join('uploads', secure_filename(file.filename))
    file.save(path)
    output = predict(model, path)
    return jsonify({'result': output})
def realtime_detect_ui(model):
    sg.theme(cfg.UI.THEME)
    layout = [[sg.Image('', key='-SCREEN-')],
              [sg.Text('', size=(30, 1), key='-PROCESS-TIME-')],
              [
                  sg.Text('Confidence Threshold'),
                  sg.Slider((0, 1),
                            0.4,
                            0.05,
                            orientation='h',
                            key='-CONF-SLIDER-'),
                  sg.Text('NMS IOU Threshold'),
                  sg.Slider((0, 1),
                            0.45,
                            0.05,
                            orientation='h',
                            key='-NMS-SLIDER-')
              ],
              [
                  sg.Button('Record', size=(10, 1), key='-REC-'),
                  sg.Button('Collect', size=(10, 1), key='-COL-'),
                  sg.Button('Close', size=(10, 1))
              ]]
    cam_no = sg.popup_get_text('Detection device number:', default_text='0')
    if cam_no is None:
        return

    # Initialise resource variables
    cam = cv2.VideoCapture()
    window = Window('Realtime Detect',
                    layout=layout,
                    finalize=True,
                    font=cfg.UI.FONT)
    record = False
    collect = False
    anno_file = None
    collect_path = None
    out = None
    anno_index = 1

    try:
        cam_no = int(cam_no)
        cam.open(cam_no)
        while True:
            event, value = window.read(timeout=0)
            flag, img = cam.read()
            if event in [None, 'Close']:
                break
            # Record button pushed
            if event in ['-REC-']:
                record = not record
                window['-REC-'].update(text=('Stop' if record else 'Record'))
            # Collect button pushed
            if event in ['-COL-']:
                if collect:
                    anno_file.flush()
                collect = not collect
                window['-COL-'].update(text=('Stop' if collect else 'Collect'))

            # Read from Webcam successfully
            if flag:
                # Predict bboxes
                img_RGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                bboxes, exe_time = predict(img_RGB, model,
                                           value['-NMS-SLIDER-'],
                                           value['-CONF-SLIDER-'])
                # Draw bbox to img
                pred_img = util.draw_bbox(img.copy(), bboxes)

                # Record video to the file
                if record:
                    if out is None:
                        out = cv2.VideoWriter(
                            time.strftime('%Y%m%d-%H%M',
                                          time.localtime(time.time())) +
                            '.mp4', -1, 20.0, (640, 480))
                    out.write(pred_img)

                # Collect image and annotation
                if collect:
                    if collect_path is None:
                        collect_path = './anno_data/collected_data_' + time.strftime(
                            '%Y%m%d-%H%M', time.localtime(time.time()))
                        os.mkdir(collect_path)
                        os.mkdir(os.path.join(collect_path, 'img'))
                    if anno_file is None:
                        anno_file = open(
                            os.path.join(collect_path, 'annotation.txt'), 'a')
                        anno_index = 1
                    img_path = os.path.join('img', str(anno_index) + '.jpg')
                    cv2.imwrite(os.path.join(collect_path, img_path), img)
                    anno_line = util.encode_annotation(bboxes, img_path)
                    anno_file.write(anno_line.strip() + '\n')
                    anno_index += 1
                # Display predicted image
                img_bytes = cv2.imencode('.png', pred_img)[1].tobytes()
                window['-SCREEN-'].update(data=img_bytes)
                window['-PROCESS-TIME-'].update(
                    value='Process time: {}ms'.format(int(exe_time * 1000)))
            else:
                raise ValueError

    except ValueError:
        sg.popup_error("Selected Device is unavailable")
        return
    finally:
        cam.release()
        if out is not None:
            out.release()
        if anno_file is not None:
            anno_file.close()
        window.close()
        del cam
        del window
        del layout
예제 #22
0
def digitPrediction():
    imageURL = (request.form.get('imageURL', False))
    digitPrediction = predict(imageURL)
    print(digitPrediction)
    return str(digitPrediction)
def run_test(src_lang,
             num_batches,
             args,
             domain_lang=None,
             slot_lang=None,
             model=None,
             evaluator=None,
             all_slot_list=None,
             test_data=None,
             is_eval=False):

    predictions = {}
    latencies = []
    src_lens = []
    tgt_lens = []
    oracle_predictions = {}
    joint_gate_matches = 0
    joint_lenval_matches = 0
    total_samples = 0

    if args['pointer_decoder']:
        predict_lang = src_lang

    if is_eval:
        predictions = {}

    for i in tqdm(range(0, num_batches)):
        start_range = i * args["eval_batch"]
        end_range = (i + 1) * args["eval_batch"]
        batch_data, feed_dict = make_test_data_set(
            start_range,
            end_range,
            test_data,
            model=model,
            slot_gating=args['slot_gating'])

        _gs, _losses, _nb_tokens, _state_out, _evaluation_variable = model.sess.run(
            [
                model.global_step, model.losses, model.nb_tokens,
                model.state_out, model.evaluation_variable
            ],
            feed_dict=feed_dict)

        if is_eval:
            predictions, latencies, src_lens, tgt_lens = predict(
                _state_out,
                _evaluation_variable,
                predict_lang,
                domain_lang,
                slot_lang,
                predictions,
                False,
                src_lang,
                args,
                feed_dict_func=make_feed_dict,
                batch_data=batch_data,
                model=model,
                slot_list=all_slot_list,
                latency=latencies,
                src_lens=src_lens,
                tgt_lens=tgt_lens,
                test=True)

            matches, oracle_predictions = predict(_state_out,
                                                  _evaluation_variable,
                                                  predict_lang,
                                                  domain_lang,
                                                  slot_lang,
                                                  oracle_predictions,
                                                  True,
                                                  src_lang,
                                                  args,
                                                  batch_data=batch_data,
                                                  test=True)

            joint_lenval_matches += matches['joint_lenval']
            joint_gate_matches += matches['joint_gate']
            total_samples += len(batch_data['turn_id'])

    avg_latencies = sum(latencies) / len(latencies)
    print("Average latency: {}".format(avg_latencies))
    with open(args['path'] + '/latency_eval.csv', 'w') as f:
        f.write(str(avg_latencies))
    pkl.dump(zip(latencies, src_lens, tgt_lens),
             open(args['path'] + '/latency_out.pkl', 'wb'))
    joint_acc_score, F1_score, turn_acc_score = -1, -1, -1
    oracle_joint_acc, oracle_f1, oracle_acc = -1, -1, -1
    joint_acc_score, F1_score, turn_acc_score = evaluator.evaluate_metrics(
        predictions, 'test')
    oracle_joint_acc, oracle_f1, oracle_acc = evaluator.evaluate_metrics(
        oracle_predictions, 'test')
    joint_lenval_acc = 1.0 * joint_lenval_matches / total_samples
    joint_gate_acc = 1.0 * joint_gate_matches / total_samples
    with open(
            args['path'] + '/eval_{}_epoch{}_ptest{}-{}.csv'.format(
                args['test_split'], args['eval_epoch'], args['p_test'],
                args['p_test_fertility']), 'a') as f:
        f.write("{},{},{},{},{},{},{},{}".format(
            joint_gate_acc, joint_lenval_acc, joint_acc_score, turn_acc_score,
            F1_score, oracle_joint_acc, oracle_acc, oracle_f1))
    print("Joint Gate Acc {}".format(joint_gate_acc))
    print("Joint Lenval Acc {}".format(joint_lenval_acc))
    print("Joint Acc {} Slot Acc {} F1 {}".format(joint_acc_score,
                                                  turn_acc_score, F1_score))
    print("Oracle Joint Acc {} Slot Acc {} F1 {}".format(
        oracle_joint_acc, oracle_f1, oracle_acc))
    json.dump(predictions,
              open(
                  args['path'] +
                  '/predictions_{}_epoch{}_ptest{}-{}.json'.format(
                      args['test_split'], args['eval_epoch'], args['p_test'],
                      args['p_test_fertility']), 'w'),
              indent=4)
    json.dump(oracle_predictions,
              open(
                  args['path'] +
                  '/oracle_predictions_{}_epoch{}_ptest{}-{}.json'.format(
                      args['test_split'], args['eval_epoch'], args['p_test'],
                      args['p_test_fertility']), 'w'),
              indent=4)
def test_basic():
    excepted = 1
    actual = predict(3)
    assert actual == excepted
예제 #25
0
from model.predict import predict
import pyttsx3
import os
import spotify
engine = pyttsx3.init()

en_voice_id = "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0"
engine.setProperty('voice', en_voice_id)
engine.setProperty('volume', 2)
spotify.login()


def say(text):
    engine.say(text)
    print(text)
    engine.runAndWait()


os.system("cls")
while True:
    msg = input(":")
    if msg == "quit":
        spotify.quit()
        break

    say(predict(msg))
def run_epoch(ep,
              total_loss,
              state_out,
              train_op,
              global_step,
              train_summaries,
              losses,
              nb_tokens,
              sess,
              src_lang,
              num_batches,
              summary_writer,
              args,
              domain_lang=None,
              slot_lang=None,
              evaluation_variable=None,
              evaluator=None,
              is_eval=False):

    avg_lenval_loss = 0
    avg_gate_loss = 0
    avg_state_loss = 0

    epoch_lenval_loss = 0
    epoch_gate_loss = 0
    epoch_state_loss = 0

    avg_slot_nb_tokens = 0
    avg_state_nb_tokens = 0
    avg_gate_nb_tokens = 0

    epoch_slot_nb_tokens = 0
    epoch_state_nb_tokens = 0
    epoch_gate_nb_tokens = 0

    epoch_joint_lenval_matches = 0
    epoch_joint_gate_matches = 0
    total_samples = 0

    if args['pointer_decoder']:
        predict_lang = src_lang

    if is_eval:
        predictions = {}

    for i in tqdm(range(0, num_batches)):
        _, _gs, _summary, _losses, _nb_tokens, _state_out, _evaluation_variable = sess.run(
            [
                train_op, global_step, train_summaries, losses, nb_tokens,
                state_out, evaluation_variable
            ])
        summary_writer.add_summary(_summary, _gs)

        if is_eval:
            matches, predictions = predict(_state_out, _evaluation_variable,
                                           predict_lang, domain_lang,
                                           slot_lang, predictions, True,
                                           src_lang, args)
            epoch_joint_lenval_matches += matches['joint_lenval']
            epoch_joint_gate_matches += matches['joint_gate']
            total_samples += len(_evaluation_variable['turn_id'])

        avg_lenval_loss += _losses['lenval_loss']
        avg_gate_loss += _losses['gate_loss']
        avg_state_loss += _losses['state_loss']

        avg_gate_nb_tokens += _nb_tokens['gate']
        avg_slot_nb_tokens += _nb_tokens['slot']
        avg_state_nb_tokens += _nb_tokens['state']

        epoch_slot_nb_tokens += _nb_tokens['slot']
        epoch_state_nb_tokens += _nb_tokens['state']
        epoch_gate_nb_tokens += _nb_tokens['gate']

        epoch_lenval_loss += _losses['lenval_loss']
        epoch_state_loss += _losses['state_loss']
        epoch_gate_loss += _losses['gate_loss']

        if (i + 1) % args['reportp'] == 0 and not is_eval:
            avg_lenval_loss /= avg_slot_nb_tokens
            avg_state_loss /= avg_state_nb_tokens
            avg_gate_loss /= avg_gate_nb_tokens
            print(
                "Step {} gate loss {:.4f} lenval loss {:.4f} state loss {:.4f}"
                .format(i + 1, avg_gate_loss, avg_lenval_loss, avg_state_loss))
            with open(args['path'] + '/train_log.csv', 'a') as f:
                f.write('{},{},{},{},{}\n'.format(ep + 1, i + 1, avg_gate_loss,
                                                  avg_lenval_loss,
                                                  avg_state_loss))
            avg_lenval_loss = 0
            avg_slot_nb_tokens = 0
            avg_state_loss = 0
            avg_state_nb_tokens = 0
            avg_gate_loss = 0
            avg_gate_nb_tokens = 0

    epoch_lenval_loss /= epoch_slot_nb_tokens
    epoch_state_loss /= epoch_state_nb_tokens
    epoch_gate_loss /= epoch_gate_nb_tokens
    joint_gate_acc, joint_lenval_acc, joint_acc_score, F1_score, turn_acc_score = 0, 0, 0, 0, 0

    real_joint_acc_score = 0.0

    if is_eval:
        joint_lenval_acc = 1.0 * epoch_joint_lenval_matches / total_samples
        joint_gate_acc = 1.0 * epoch_joint_gate_matches / total_samples
        joint_acc_score, F1_score, turn_acc_score = -1, -1, -1
        #join accuracy score, turn accuracy score, F1 score 구하기
        joint_acc_score, F1_score, turn_acc_score = evaluator.evaluate_metrics(
            predictions, 'dev')

    print(
        "Epoch {} gate loss {:.4f} lenval loss {:.4f} state loss {:.4f} \n joint_gate acc {:.4f} joint_lenval acc {:.4f} joint acc {:.4f} f1 {:.4f} turn acc {:.4f}"
        .format(ep + 1, epoch_gate_loss, epoch_lenval_loss, epoch_state_loss,
                joint_gate_acc, joint_lenval_acc, joint_acc_score, F1_score,
                turn_acc_score))
    print(args['path'])
    with open(args['path'] + '/val_log.csv', 'a') as f:
        if is_eval:
            split = 'dev'
        else:
            split = 'train'
        f.write('{},{},{},{},{},{},{},{},{},{}\n'.format(
            ep + 1, split, epoch_gate_loss, epoch_lenval_loss,
            epoch_state_loss, joint_gate_acc, joint_lenval_acc,
            joint_acc_score, F1_score, turn_acc_score))
    if is_eval:
        return (epoch_gate_loss + epoch_lenval_loss +
                epoch_state_loss) / 3, (joint_gate_acc + joint_lenval_acc +
                                        joint_acc_score) / 3, joint_acc_score
    else:
        return _gs
예제 #27
0
def benchmark_ui(model):
    sg.theme(cfg.UI.THEME)
    layout = [[sg.Text('Ground truth annotation Path:')],
              [
                  sg.Input(key='-INPUT-PATH-', enable_events=True),
                  sg.FileBrowse(initial_folder='./',
                                file_types=(('Annotation file',
                                             'annotation.txt'), ('All files',
                                                                 '*.*')))
              ],
              [
                  sg.Text('Confidence Threshold'),
                  sg.Slider((0, 1),
                            0.4,
                            0.05,
                            orientation='h',
                            key='-CONF-SLIDER-'),
                  sg.Text('NMS IOU Threshold'),
                  sg.Slider((0, 1),
                            0.45,
                            0.05,
                            orientation='h',
                            key='-NMS-SLIDER-')
              ], [sg.B('Run Benchmark'), sg.B('Exit')]]
    window = Window('Benchmark', layout, font=(cfg.UI.FONT, 12))
    while True:
        event, value = window.read()

        if event in ['Exit', None]:
            break
        # Start detection
        if event in ['Run Benchmark']:
            anno_path = value['-INPUT-PATH-']
            nms_iou = value['-NMS-SLIDER-']
            conf_thresh = value['-CONF-SLIDER-']
            anno_file = None
            tp = tn = fp = fn = 0
            try:
                # Open and parse annotation file
                anno_file = open(anno_path, 'r')
                dir_path = os.path.dirname(anno_path)
                annotations = anno_file.readlines()
                anno_data = []
                for anno in annotations:
                    anno = anno.strip()
                    bboxes, img_path = util.parse_annotation(anno)
                    img_path = os.path.join(dir_path, img_path)
                    anno_data.append([img_path, bboxes])
                if len(anno_data) == 0:
                    raise IOError()
                max_progress = len(anno_data)
                cur_progress = 0
                window.disable()
                sg.one_line_progress_meter('Calculating Benchmark',
                                           cur_progress,
                                           max_progress,
                                           key='-PROGRESS-')
                # Start to predict and assess:
                for entry in anno_data:
                    img_RGB = io.imread(entry[0])
                    cur_tp = cur_tn = cur_fp = cur_fn = 0
                    prediction, _ = predict(img_RGB, model, nms_iou,
                                            conf_thresh)
                    target_bboxes = entry[1]
                    hit_bboxes = []
                    if len(prediction) == 0 and len(target_bboxes) == 0:
                        cur_tn += 1
                    for bbox in prediction:
                        coor = np.array(bbox[:4], dtype=np.int32)
                        centroid = ((coor[0] + coor[2]) / 2,
                                    (coor[1] + coor[3]) / 2)
                        hit = False
                        for t_bbox in target_bboxes:
                            if (t_bbox[0] <= centroid[0] <= t_bbox[2]) and (
                                    t_bbox[1] <= centroid[1] <= t_bbox[3]):
                                cur_tp += 1
                                hit = True
                                target_bboxes.remove(t_bbox)
                                hit_bboxes.append(t_bbox)
                                break
                        if not hit:
                            for t_bbox in hit_bboxes:
                                if (t_bbox[0] <= centroid[0] <= t_bbox[2]
                                    ) and (t_bbox[1] <= centroid[1] <=
                                           t_bbox[3]):
                                    hit = True
                                    break
                        if not hit:
                            cur_fp += 1
                    cur_fn += len(target_bboxes)
                    tp += cur_tp
                    tn += cur_tn
                    fp += cur_fp
                    fn += cur_fn
                    cur_progress += 1
                    # Update progress bar
                    if not sg.one_line_progress_meter('Calculating Benchmark',
                                                      cur_progress,
                                                      max_progress,
                                                      key='-PROGRESS-'):
                        break

                if sg.popup_ok(
                        "Detection finished,press 'OK' to see the result",
                        title="Finish") is not None:
                    fig, axs = plt.subplots(1, 2)
                    axs[0].pie((tp, tn, fp, fn),
                               labels=["TP", "TN", "FP", "FN"],
                               autopct='%1.1f%%')
                    axs[0].axis('equal')
                    pre = tp / (tp + fp) if tp + fp != 0 else 0
                    rec = tp / (tp + fn) if tp + fn != 0 else 0
                    spe = tn / (fp + tn) if fp + tn != 0 else 0
                    f1 = (2 * pre * rec) / (pre + rec)
                    f2 = (5 * pre * rec) / (4 * pre + rec)
                    rects = axs[1].bar(('Prec', 'Rec', 'Spec', 'F1', 'F2'),
                                       (pre, rec, spe, f1, f2))
                    for rect in rects:
                        height = rect.get_height()
                        axs[1].annotate('{:.2f}%'.format(height * 100),
                                        xy=(rect.get_x() +
                                            rect.get_width() / 2, height),
                                        xytext=(0, 3),
                                        textcoords="offset points",
                                        ha='center',
                                        va='bottom')
                    fig.suptitle('Benchmarks')
                    benchmark_img = os.path.join(dir_path, 'benchmark.png')
                    plt.savefig(benchmark_img)
                    # sg.popup_animated(benchmark_img, no_titlebar=False)
                    Window("Benchmark",
                           [[sg.Image(benchmark_img)]]).read(close=True)

            except IOError as e:
                sg.one_line_progress_meter('Calculating Benchmark',
                                           1,
                                           1,
                                           key='-PROGRESS-')
                sg.PopupError("Invalid input file,\n{}".format(e))
            finally:
                if anno_file is not None:
                    anno_file.close()
                window.enable()

    window.close()
    del window
    del layout