コード例 #1
0
ファイル: anim_generator.py プロジェクト: chenzongxiong/fen
def simulation():
    # fname = 'new-dataset/models/diff_weights/method-sin/activation-None/state-0/markov_chain/mu-0/sigma-110/units-10000/nb_plays-20/points-1000/input_dim-1/mu-0-sigma-110-points-1000.csv'
    # fname = 'new-dataset/models/diff_weights/method-sin/activation-None/state-0/markov_chain/mu-0/sigma-110/units-20/nb_plays-20/points-1000/input_dim-1/predictions-mu-0-sigma-110-points-1000/activation#-elu/state#-0/units#-100/nb_plays#-100/ensemble-6/loss-mle/predictions-batch_size-1500-epochs-16000-debug.csv'
    fname = './new-dataset/models/diff_weights/method-sin/activation-None/state-0/markov_chain/mu-0/sigma-110/units-20/nb_plays-20/points-1000/input_dim-1/predictions-mu-0-sigma-110-points-1000/activation#-elu/state#-0/units#-100/nb_plays#-100/ensemble-11/loss-mle/predictions-batch_size-1500-debug.csv'
    fname = './new-dataset/models/diff_weights/method-sin/activation-None/state-0/markov_chain/mu-0/sigma-110/units-20/nb_plays-20/points-1000/input_dim-1/predictions-mu-0-sigma-110-points-1000/activation#-elu/state#-0/units#-100/nb_plays#-100/ensemble-11/loss-mle/predictions-batch_size-1500-debug-4.csv'
    inputs, outputs = tdata.DatasetLoader.load_data(fname)
    # points = 2000
    # inputs, outputs = inputs[:points], outputs[:points]

    # interp = 1
    # t = np.linspace(1, points, points)
    # f = interp1d(t, inputs, kind='cubic')
    # t_interp = np.linspace(1, points, (int)(interp*points-interp+1))
    # inputs_interp = f(t_interp)

    # import matplotlib.pyplot as plt
    # length = 50
    # plt.plot(t[:length], inputs[:length], 'o')
    # plt.plot(t_interp[:interp*length-1], inputs_interp[:(interp*length-1)], '-x')
    # plt.show()

    # plt.plot(t_[:length], ground_truth[:length], 'o')
    # plt.plot(t_interp[:interp*length-1], ground_truth_interp[:(interp*length-1)], '-x')
    # plt.show()

    colors = utils.generate_colors()
    # fname = '/Users/zxchen/Desktop/debug-1.gif'
    fname = './debug-4.gif'
    utils.save_animation(inputs,
                         outputs,
                         fname,
                         step=100,
                         colors=colors,
                         mode="snake")
コード例 #2
0
def predict(sess, image_file):
    """
    Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.

    Arguments:
    sess -- your tensorflow/Keras session containing the YOLO graph
    image_file -- name of an image stored in the "images" folder.

    Returns:
    out_scores -- tensor of shape (None, ), scores of the predicted boxes
    out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
    out_classes -- tensor of shape (None, ), class index of the predicted boxes

    Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes.
    """
    image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))

    out_scores, out_boxes, out_classes = sess.run(yolo_eval(yolo_outputs, image_shape),
                                                  feed_dict={yolo_model.input:image_data,
                                                             K.learning_phase():0})

    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    colors = generate_colors(class_names)
    draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    image.save(os.path.join("out", image_file), quality=90)
    output_image = scipy.misc.imread(os.path.join("out", image_file))
    imshow(output_image)

    return out_scores, out_boxes, out_classes
コード例 #3
0
    def depth_non_cum_figure(cls, df, scale):
        data = df.pivot(index='tradePx', columns='datetime', values='tradeSz')
        x = df['datetime'].drop_duplicates()
        colorscale = generate_colors(scale)
        best_df = df.drop_duplicates('datetime')
        bid, ask = best_df['bidPx'], best_df['askPx']

        traces = [
            go.Heatmap(z=data.values,
                       x=x,
                       y=data.index,
                       hovertemplate=HOVER_TEMPLATES['depth_figure'],
                       colorscale=colorscale),
            go.Scatter(x=x,
                       y=bid,
                       name='Bid',
                       mode='lines',
                       line_color='green',
                       hovertemplate=HOVER_TEMPLATES['line'],
                       line_width=2),
            go.Scatter(x=x,
                       y=ask,
                       name='Ask',
                       mode='lines',
                       line_color='red',
                       hovertemplate=HOVER_TEMPLATES['line'],
                       line_width=2)
        ]

        layout = dict(title_text="Volumes per price")
        x_axes = dict(showspikes=True, spikemode="across")
        return traces, layout, x_axes
コード例 #4
0
ファイル: anim_generator.py プロジェクト: chenzongxiong/fen
def operator_generator_with_noise():
    mu = 0
    sigma = 0.1
    points = 5000
    loss_name = 'mse'
    for method in methods:
        for weight in weights:
            for width in widths:
                LOG.debug(
                    "Processing method: {}, weight: {}, width: {}, points: {}".
                    format(method, weight, width, points))
                fname = constants.FNAME_FORMAT["operators_noise"].format(
                    method=method,
                    weight=weight,
                    width=width,
                    mu=mu,
                    sigma=sigma,
                    points=points)
                inputs, ground_truth = tdata.DatasetLoader.load_data(fname)
                fname = constants.FNAME_FORMAT[
                    "operators_noise_predictions"].format(method=method,
                                                          weight=weight,
                                                          width=width,
                                                          mu=mu,
                                                          sigma=sigma,
                                                          points=points,
                                                          loss=loss_name)
                _, predictions = tdata.DatasetLoader.load_data(fname)
                inputs = np.vstack([inputs, inputs]).T
                outputs = np.vstack([ground_truth, predictions]).T
                colors = utils.generate_colors(outputs.shape[-1])
                fname = constants.FNAME_FORMAT["operators_noise_gif"].format(
                    method=method,
                    weight=weight,
                    width=width,
                    sigma=sigma,
                    mu=mu,
                    points=points,
                    loss=loss_name)
                utils.save_animation(inputs,
                                     outputs,
                                     fname,
                                     step=40,
                                     colors=colors)
                fname = constants.FNAME_FORMAT[
                    "operators_noise_gif_snake"].format(method=method,
                                                        weight=weight,
                                                        width=width,
                                                        mu=mu,
                                                        sigma=sigma,
                                                        points=points,
                                                        loss=loss_name)
                utils.save_animation(inputs,
                                     outputs,
                                     fname,
                                     step=40,
                                     colors=colors,
                                     mode="snake")
コード例 #5
0
ファイル: yolo.py プロジェクト: pskrunner14/yolo-detector
 def __init__(self,
              model_path=None,
              anchors_path=None,
              classes_path=None,
              dims=None):
     if model_path is None or anchors_path is None or classes_path is None or dims is None or len(
             dims) != 2:
         raise ValueError('Arguments do not match the specification.')
     self._model = keras.models.load_model(model_path, compile=False)
     self._anchors = read_anchors(anchors_path)
     self._class_names = read_classes(classes_path)
     self._dims = dims
     self._image_shape = list(reversed([int(x) for x in dims]))
     self._model_input_dims = (608, 608)
     self._colors = generate_colors(self._class_names)
     self._sess = K.get_session()
     self._construct_graph()
コード例 #6
0
def detect_img(model):

    class_num = 0
    classes_path = os.path.expanduser(FLAGS.classes_path)
    with open(classes_path) as f:
        class_num = len(f.readlines())
    colors = generate_colors(class_num)

    while True:
        img = input('Input image filename:')
        try:
            image = Image.open(img)
        except:
            print('Open Error! Try again!')
            continue
        else:
            result = model.detect_image(image)
            objects = result['objects']
            r_image = test_image.make_r_image(image, objects, colors)
            r_image.show()
    model.close_session()
コード例 #7
0
def detect_video(model, video_path, output_path=""):
    import cv2

    # Generate colors for drawing bounding boxes.
    class_num = 0
    classes_path = os.path.expanduser(FLAGS.classes_path)
    with open(classes_path) as f:
        class_num = len(f.readlines())
    colors = generate_colors(class_num)

    with open(classes_path) as f:
        class_num = len(f.readlines())
    hsv_tuples = [(x / class_num, 1., 1.) for x in range(class_num)]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))
    np.random.seed(10101)  # Fixed seed for consistent colors across runs.
    np.random.shuffle(
        colors)  # Shuffle colors to decorrelate adjacent classes.
    np.random.seed(None)  # Reset seed to default.

    vid = cv2.VideoCapture(video_path)
    if not vid.isOpened():
        raise IOError("Couldn't open webcam or video")
    video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
    video_fps = vid.get(cv2.CAP_PROP_FPS)
    video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                  int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    isOutput = True if output_path != "" else False
    if isOutput:
        print("!!! TYPE:", type(output_path), type(video_FourCC),
              type(video_fps), type(video_size))
        out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    prev_time = timer()
    while True:
        return_value, frame = vid.read()
        image = Image.fromarray(frame)
        result = model.detect_image(image)
        objects = result['objects']
        r_image = make_r_image(image, objects, colors)
        result = np.asarray(r_image)
        curr_time = timer()
        exec_time = curr_time - prev_time
        prev_time = curr_time
        accum_time = accum_time + exec_time
        curr_fps = curr_fps + 1
        if accum_time > 1:
            accum_time = accum_time - 1
            fps = "FPS: " + str(curr_fps)
            curr_fps = 0
        cv2.putText(result,
                    text=fps,
                    org=(3, 15),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=0.50,
                    color=(255, 0, 0),
                    thickness=2)
        cv2.namedWindow("result", cv2.WINDOW_NORMAL)
        cv2.imshow("result", result)
        if isOutput:
            out.write(result)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    model.close_session()
コード例 #8
0
ファイル: anim_generator.py プロジェクト: chenzongxiong/fen
def model_generator_with_noise():
    mu = 0
    sigma = 0.01
    points = 5000
    nb_plays = [40]

    units = 20

    loss_name = 'mse'

    for method in methods:
        for weight in weights:
            for width in widths:
                for _nb_plays in nb_plays:
                    LOG.debug(
                        "Processing method: {}, weight: {}, width: {}, points: {}, units: {}, np_plays: {}, sigma: {}, mu: {}, loss: {}"
                        .format(method, weight, width, points, units, nb_plays,
                                sigma, mu, loss_name))
                    fname = constants.FNAME_FORMAT["models_noise"].format(
                        method=method,
                        weight=weight,
                        width=width,
                        nb_plays=_nb_plays,
                        units=units,
                        points=points,
                        mu=mu,
                        sigma=sigma)
                    _inputs, ground_truth = tdata.DatasetLoader.load_data(
                        fname)
                    # for __nb_plays in nb_plays:
                    #     for bz in batch_sizes:
                    __nb_plays = _nb_plays
                    bz = 1
                    if True:
                        if True:
                            # fname = constants.FNAME_FORMAT["models_predictions"].format(method=method, weight=weight,
                            #                                                             width=width, nb_plays=_nb_plays,
                            #                                                             nb_plays_=__nb_plays,
                            # batch_size=bz)

                            fname = constants.FNAME_FORMAT[
                                "models_noise_predictions"].format(
                                    method=method,
                                    weight=weight,
                                    width=width,
                                    nb_plays=_nb_plays,
                                    nb_plays_=__nb_plays,
                                    batch_size=bz,
                                    units=units,
                                    points=points,
                                    mu=mu,
                                    sigma=sigma,
                                    loss=loss_name)
                            try:
                                _, predictions = tdata.DatasetLoader.load_data(
                                    fname)
                            except:
                                continue

                            outputs = np.vstack([ground_truth, predictions]).T
                            colors = utils.generate_colors(outputs.shape[-1])
                            inputs = np.vstack(
                                [_inputs for _ in range(outputs.shape[-1])]).T
                            fname = constants.FNAME_FORMAT[
                                "models_noise_gif"].format(
                                    method=method,
                                    weight=weight,
                                    width=width,
                                    nb_plays=_nb_plays,
                                    nb_plays_=__nb_plays,
                                    batch_size=bz,
                                    units=units,
                                    points=points,
                                    mu=mu,
                                    sigma=sigma,
                                    loss=loss_name)
                            utils.save_animation(inputs,
                                                 outputs,
                                                 fname,
                                                 step=40,
                                                 colors=colors)
                            fname = constants.FNAME_FORMAT[
                                "models_noise_gif_snake"].format(
                                    method=method,
                                    weight=weight,
                                    width=width,
                                    nb_plays=_nb_plays,
                                    nb_plays_=__nb_plays,
                                    batch_size=bz,
                                    units=units,
                                    points=points,
                                    mu=mu,
                                    sigma=sigma,
                                    loss=loss_name)
                            utils.save_animation(inputs,
                                                 outputs,
                                                 fname,
                                                 step=40,
                                                 colors=colors,
                                                 mode="snake")

                            fname = constants.FNAME_FORMAT[
                                "models_noise_ts_outputs_gif"].format(
                                    method=method,
                                    weight=weight,
                                    width=width,
                                    nb_plays=_nb_plays,
                                    nb_plays_=__nb_plays,
                                    batch_size=bz,
                                    units=units,
                                    points=points,
                                    mu=mu,
                                    sigma=sigma,
                                    loss=loss_name)
                            # steps = inputs.shape[-1]
                            _inputs = np.arange(points)
                            inputs = np.vstack(
                                [_inputs for _ in range(outputs.shape[-1])]).T
                            utils.save_animation(inputs,
                                                 outputs,
                                                 fname,
                                                 step=points,
                                                 colors=colors)
コード例 #9
0
ファイル: anim_generator.py プロジェクト: chenzongxiong/fen
def model_nb_plays_generator_with_noise():
    step = 40
    method = 'sin'
    # method = 'noise'

    with_noise = True
    diff_weights = True
    run_test = False
    train_invert = True
    interp = 10
    force_rerun = False

    mu = 0
    sigma = 2
    points = 1000
    input_dim = 1
    # ground truth
    nb_plays = 20
    units = 1
    state = 0
    activation = None
    # activation = 'tanh'
    # predicitons
    __nb_plays__ = 20
    __units__ = 1
    __state__ = 0
    __activation__ = None
    # __activation__ = 'tanh'

    loss_name = 'mse'

    if method == 'noise':
        with_noise = True

    if with_noise is False:
        mu = 0
        sigma = 0

    if interp == 1:
        if run_test is False:
            if diff_weights is True:
                base_file_key = 'models_diff_weights'
                predictions_file_key = 'models_diff_weights_predictions'

                models_gif_key = 'models_diff_weights_gif'
                models_snake_gif_key = 'models_diff_weights_snake_gif'
                models_ts_outputs_gif_key = 'models_diff_weights_ts_outputs_gif'
            else:
                base_file_key = 'models'
                predictions_file_key = 'models_predictions'

                models_gif_key = 'models_gif'
                models_snake_gif_key = 'models_snake_gif'
                models_ts_outputs_gif_key = 'models_ts_outputs_gif'
        elif run_test is True:
            if diff_weights is True:
                base_file_key = 'models_diff_weights_test'
                predictions_file_key = 'models_diff_weights_test_predictions'
                models_gif_key = 'models_diff_weights_test_gif'
                models_snake_gif_key = 'models_diff_weights_test_snake_gif'
                models_ts_outputs_gif_key = 'models_diff_weights_test_ts_outputs_gif'
            else:
                raise
    elif interp != 1:
        if run_test is False:
            if diff_weights is True:
                if train_invert is False:
                    base_file_key = 'models_diff_weights'
                    models_interp_key = 'models_diff_weights_interp'
                    predictions_file_key = 'models_diff_weights_predictions_interp'

                    models_gif_key = 'models_diff_weights_interp_gif'
                    models_snake_gif_key = 'models_diff_weights_snake_interp_gif'
                    models_ts_outputs_gif_key = 'models_diff_weights_ts_outputs_interp_gif'
                elif train_invert is True:
                    base_file_key = 'models_diff_weights_interp'
                    models_interp_key = 'models_diff_weights_invert_interp'
                    predictions_file_key = 'models_diff_weights_invert_interp_predictions'

                    models_gif_key = 'models_diff_weights_invert_interp_gif'
                    models_snake_gif_key = 'models_diff_weights_invert_snake_interp_gif'
                    models_ts_outputs_gif_key = 'models_diff_weights_invert_ts_outputs_interp_gif'
            else:
                # base_interp_key = 'models_interp'
                # predictions_file_key = 'models_predictions_interp'

                # models_gif_key = 'models_interp_gif'
                # models_snake_gif_key = 'models_snake_interp_gif'
                # models_ts_outputs_gif_key = 'models_ts_outputs_interp_gif'
                raise
        elif run_test is True:
            if diff_weights is True:
                base_file_key = 'models_diff_weights_test'
                models_interp_key = 'models_diff_weights_test_interp'
                predictions_file_key = 'models_diff_weights_test_predictions_interp'
                models_gif_key = 'models_diff_weights_test_interp_gif'
                models_snake_gif_key = 'models_diff_weights_test_snake_interp_gif'
                models_ts_outputs_gif_key = 'models_diff_weights_test_ts_outputs_interp_gif'
            else:
                raise

    if run_test is True and method == 'sin':
        method = 'mixed'

    fname = constants.DATASET_PATH[base_file_key].format(interp=interp,
                                                         method=method,
                                                         activation=activation,
                                                         state=state,
                                                         mu=mu,
                                                         sigma=sigma,
                                                         units=units,
                                                         nb_plays=nb_plays,
                                                         points=points,
                                                         input_dim=input_dim)

    _inputs, ground_truth = tdata.DatasetLoader.load_data(fname)
    import ipdb
    ipdb.set_trace()
    LOG.debug("Load **ground-truth** dataset from file: {}".format(
        coloring.cyan(fname)))

    predicted_fname = constants.DATASET_PATH[predictions_file_key].format(
        interp=interp,
        method=method,
        activation=activation,
        state=state,
        mu=mu,
        sigma=sigma,
        units=units,
        nb_plays=nb_plays,
        points=points,
        input_dim=input_dim,
        __activation__=__activation__,
        __state__=__state__,
        __units__=__units__,
        __nb_plays__=__nb_plays__,
        loss=loss_name)
    if interp == 1:
        try:
            _, predictions = tdata.DatasetLoader.load_data(predicted_fname)
            LOG.debug("Load **predicted** dataset from file: {}".format(
                coloring.cyan(predicted_fname)))
        except FileNotFoundError:
            LOG.warn("GROUND TRUTH and PREDICTIONS are the SAME dataset")
            predictions = ground_truth

    elif interp != 1:
        models_interp_fname = constants.DATASET_PATH[models_interp_key].format(
            interp=interp,
            method=method,
            activation=activation,
            state=state,
            mu=mu,
            sigma=sigma,
            units=units,
            nb_plays=nb_plays,
            points=points,
            input_dim=input_dim,
            __activation__=__activation__,
            __state__=__state__,
            __units__=__units__,
            __nb_plays__=__nb_plays__,
            loss=loss_name)

        if force_rerun is False and os.path.isfile(models_interp_fname):
            LOG.debug("Already interploted...")
            t_interp = np.linspace(1, points,
                                   (int)(interp * points - interp + 1))
            _inputs_interp, ground_truth_interp = tdata.DatasetLoader.load_data(
                models_interp_fname)
            LOG.debug("Load **ground-truth** dataset from file: {}".format(
                coloring.purple(models_interp_fname)))
            try:
                _, predictions_interp = tdata.DatasetLoader.load_data(
                    predicted_fname)
                LOG.debug("Load **predicted** dataset from file: {}".format(
                    coloring.cyan(predicted_fname)))
            except FileNotFoundError:
                LOG.warn("GROUND TRUTH and PREDICTIONS are the SAME dataset")
                predictions_interp = ground_truth_interp

            clip_length = min(predictions_interp.shape[0],
                              _inputs_interp.shape[0])
            t_interp = t_interp[:clip_length]
            _inputs_interp = _inputs_interp[:clip_length]
            ground_truth_interp = ground_truth_interp[:clip_length]
            predictions_interp = predictions_interp[:clip_length]
        else:
            if train_invert is False:
                diff = _inputs[1:] - _inputs[:-1]
                LOG.debug("Max jump between two successive x is {}".format(
                    np.max(np.abs(diff))))

                t_ = np.linspace(1, points, points)

                # f1 = interp1d(t_, _inputs)
                f2 = interp1d(t_, _inputs, kind='cubic')
                t_interp = np.linspace(1, points,
                                       (int)(interp * points - interp + 1))

                _inputs_interp = np.interp(t_interp, t_, _inputs)
                _inputs_interp = f2(t_interp)
                clip_length = int((t_interp.shape[0] // input_dim) * input_dim)
                _inputs_interp = _inputs_interp[:clip_length]
                # ground_truth_interp = np.interp(_inputs_interp, _inputs, ground_truth, period=1)
                # predictions_interp = np.interp(_inputs_interp, _inputs, predictions, period=1)
                _, ground_truth_interp = tdata.DatasetGenerator.systhesis_model_generator(
                    inputs=_inputs_interp,
                    nb_plays=nb_plays,
                    points=t_interp.shape[0],
                    units=units,
                    mu=None,
                    sigma=None,
                    input_dim=input_dim,
                    activation=activation,
                    with_noise=None,
                    method=None,
                    diff_weights=diff_weights)
                predictions_interp = ground_truth_interp
                # import matplotlib.pyplot as plt
                # length = 50
                # plt.plot(t_[:length], _inputs[:length], 'o')
                # plt.plot(t_interp[:interp*length-1], _inputs_interp[:(interp*length-1)], '-x')
                # plt.show()

                # plt.plot(t_[:length], ground_truth[:length], 'o')
                # plt.plot(t_interp[:interp*length-1], ground_truth_interp[:(interp*length-1)], '-x')
                # plt.show()

                LOG.debug("Save interploted dataset to file: {}".format(
                    coloring.cyan(models_interp_fname)))
                tdata.DatasetSaver.save_data(_inputs_interp,
                                             ground_truth_interp,
                                             models_interp_fname)
                sys.exit(0)
            elif train_invert is True:
                _inputs_interp, ground_truth_interp = ground_truth, _inputs
                tdata.DatasetSaver.save_data(_inputs_interp,
                                             ground_truth_interp,
                                             models_interp_fname)
                LOG.debug("Save interploted dataset to file: {}".format(
                    coloring.cyan(models_interp_fname)))
                sys.exit(0)

        _inputs = _inputs_interp
        ground_truth = ground_truth_interp
        predictions = predictions_interp

    models_gif_fname = constants.DATASET_PATH[models_gif_key].format(
        interp=interp,
        method=method,
        activation=activation,
        state=state,
        mu=mu,
        sigma=sigma,
        units=units,
        nb_plays=nb_plays,
        points=points,
        input_dim=input_dim,
        __activation__=__activation__,
        __state__=__state__,
        __units__=__units__,
        __nb_plays__=__nb_plays__,
        loss=loss_name)
    models_snake_gif_fname = constants.DATASET_PATH[
        models_snake_gif_key].format(interp=interp,
                                     method=method,
                                     activation=activation,
                                     state=state,
                                     mu=mu,
                                     sigma=sigma,
                                     units=units,
                                     nb_plays=nb_plays,
                                     points=points,
                                     input_dim=input_dim,
                                     __activation__=__activation__,
                                     __state__=__state__,
                                     __units__=__units__,
                                     __nb_plays__=__nb_plays__,
                                     loss=loss_name)
    models_ts_outputs_gif_fname = constants.DATASET_PATH[
        models_ts_outputs_gif_key].format(interp=interp,
                                          method=method,
                                          activation=activation,
                                          state=state,
                                          mu=mu,
                                          sigma=sigma,
                                          units=units,
                                          nb_plays=nb_plays,
                                          points=points,
                                          input_dim=input_dim,
                                          __activation__=__activation__,
                                          __state__=__state__,
                                          __units__=__units__,
                                          __nb_plays__=__nb_plays__,
                                          loss=loss_name)

    LOG.debug("Write outputs vs. inputs {} into file {}".format(
        coloring.red("(sequence mode)"), coloring.cyan(models_gif_fname)))

    outputs = np.vstack([ground_truth, predictions]).T
    colors = utils.generate_colors(outputs.shape[-1])
    inputs = np.vstack([_inputs for _ in range(outputs.shape[-1])]).T

    # utils.save_animation(inputs, outputs, models_gif_fname, step=step, colors=colors)

    ##### SNAKE
    _inputs = np.hstack([_inputs, _inputs])
    ground_truth = np.hstack([ground_truth, ground_truth])
    predictions = np.hstack([predictions, predictions])

    inputs = np.vstack([_inputs for _ in range(outputs.shape[-1])]).T
    outputs_snake = np.vstack([ground_truth, predictions]).T

    LOG.debug("Write outputs vs. inputs {} into file {}".format(
        coloring.red("(snake mode)"), coloring.cyan(models_snake_gif_fname)))
    utils.save_animation(inputs,
                         outputs_snake,
                         models_snake_gif_fname,
                         step=step,
                         colors=colors,
                         mode="snake")

    if interp == 1:
        _inputs = np.arange(points)
    else:
        _inputs = t_interp

    inputs = np.vstack([_inputs for _ in range(outputs.shape[-1])]).T
    # outputs = np.vstack([ground_truth, predictions]).T
    LOG.debug("Write outputs vs. ts into file {}".format(
        coloring.cyan(models_ts_outputs_gif_fname)))
    utils.save_animation(inputs,
                         outputs,
                         models_ts_outputs_gif_fname,
                         step=points,
                         colors=colors)
コード例 #10
0
image_cp = preprocess_image(image)  #图像预处理,resize image, normalization归一化, 增加一个在第0的维度--batch_size
tf_image = tf.placeholder(tf.float32,[1,input_size[0],input_size[1],3])  #定义placeholder
model_output = darknet(tf_image)  #网络的输出

output_sizes = input_size[0]//32, input_size[1]//32 # 特征图尺寸是图片下采样32倍

#这个函数返回框的坐标(左上角-右下角),目标置信度,类别置信度
output_decoded = decode(model_output=model_output,output_sizes=output_sizes, num_class=len(class_names),anchors=anchors)




with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())   #初始化tensorflow全局变量
    saver = tf.train.Saver()
    saver.restore(sess, model_path)  #把模型加载到当前session中
    bboxes, obj_probs, class_probs = sess.run(output_decoded, feed_dict={tf_image: image_cp})  #这个函数返回框的坐标,目标置信度,类别置信度


bboxes,scores,class_max_index = postprocess(bboxes,obj_probs,class_probs,image_shape=image_shape)   #得到候选框之后的处理,先留下阈值大于0.5的框,然后再放入非极大值抑制中去
colors = generate_colors(class_names)
img_detection = draw_detection(image, bboxes, scores, class_max_index, class_names, colors)  #得到图片

cv2.imwrite(image_detection, img_detection)
cv2.imshow("detection_results", img_detection)  #显示图片
cv2.waitKey(0)  #等待按任意键退出



コード例 #11
0
def archive_pipeline():
    start_time = time.time()
    path = "./Dataset/refined_data/temp/tags_relationship.csv"
    df = utils.load_csv(path)
    df["Tag1"] = df["Tag1"].apply(lambda x: "_" + str(x))
    df["Tag2"] = df["Tag2"].apply(lambda x: "_" + str(x))
    # print(df.head())

    weights = df["Occurrence"].values.tolist()
    threshold = np.quantile(weights, 0.995)
    min_w, max_w, avg_w = np.min(weights), np.max(weights), np.average(weights)
    print("Min = {}, Max = {}, Avg = {}, Threshold = {}".format(
        min_w, max_w, avg_w, threshold))
    tags1 = df["Tag1"].values.tolist()
    tags2 = df["Tag2"].values.tolist()
    tags = tags1
    tags.extend(tags2)

    vertices = list(set(tags))
    n_vertices = len(vertices)
    # print("Number vertices : ", n_vertices)
    edge_list = list(zip(tags1, tags2))
    # print(edge_list[:3])

    # Build graph
    g = igraph.Graph()
    g.add_vertices(n_vertices)
    g.vs["name"] = vertices
    g.vs["label"] = vertices
    g.vs["style"] = "filled"
    g.add_edges(edge_list)
    g.es["weight"] = weights
    print(g.summary())

    print("\nAfter delete vertices")
    g.vs.select(_degree_le=5).delete()
    print(g.summary())

    # Community detection
    cluster = g.community_multilevel(return_levels=True, weights=weights)[-1]
    print(cluster.summary())
    # print([c.summary() for c in cluster])
    num_communities = len(cluster)
    # print(c.membership)
    labels = cluster.membership
    g.es.select(weight_lt=threshold)["style"] = "invis"
    print("\nAfter delete edges")
    print(g.summary())

    # colors = list(np.linspace(0, 0xFFFFFF, num_communities+1)[1:].astype(int))
    colors = utils.generate_colors(num_communities)
    colors = ["#{:06X}".format(color) for color in colors]
    g.vs["fillcolor"] = [colors[label] for label in labels]

    # Save graph
    save_dot_path = "./Visualize/graph.dot"
    save_pdf_path = "./Visualize/graph.pdf"
    utils.make_parent_dirs(save_dot_path)
    g.write_dot(save_dot_path)
    check_call([
        'sfdp', "-Goverlap=false", '-Tpdf', save_dot_path, '-o', save_pdf_path
    ])

    exec_time = time.time() - start_time
    print("Time : {:.2f} seconds".format(exec_time))
コード例 #12
0
    # =================

    # print(cluster.summary())
    # print([c.summary() for c in cluster])

    # print(c.membership)

    g.vs["community"] = labels

    # g.es.select(weight_lt=threshold)["style"] = "invis"
    g.es.select(lambda e: is_delete_edge(g, e)).delete()
    print("\nAfter delete edges")
    print(g.summary())

    # colors = list(np.linspace(0, 0xFFFFFF, num_communities+1)[1:].astype(int))
    colors = utils.generate_colors(num_communities)
    colors = ["#{:06X}".format(color) for color in colors]
    g.vs["fillcolor"] = [colors[label] for label in labels]

    # Save graph
    names = "_".join(selected_categories)
    save_dot_path = "./Visualize/graph_{}_{}.dot".format(
        names, num_communities)
    save_pdf_path = "./Visualize/graph_{}_{}.pdf".format(
        names, num_communities)
    utils.make_parent_dirs(save_dot_path)
    g.write_dot(save_dot_path)
    check_call([
        'sfdp', "-Goverlap=false", "-Goutputorder=edgesfirst", '-Tpdf',
        save_dot_path, '-o', save_pdf_path
    ])
コード例 #13
0
def detect_img(model):

    image_glob = FLAGS.image_glob
    test_file = FLAGS.test_file
    print(image_glob)
    print(FLAGS.model_path)
    result_name = os.path.basename(os.path.dirname(FLAGS.model_path))

    image_source = ""
    if image_glob:
        img_path_list = glob.glob(image_glob)
        image_source = image_glob
    else:
        with open(test_file) as f:
            img_path_list = [line.strip().split()[0] for line in f]
        image_source = test_file

    pdir = os.path.join("results", FLAGS.network,
                        os.path.basename(os.path.dirname(image_source)))

    output_dir = utils.get_unused_dir_num(pdir=pdir, pref=result_name)

    image_output_dir = os.path.join(output_dir, "images")
    os.makedirs(image_output_dir, exist_ok=True)

    prediction_output_dir = os.path.join(output_dir, "predictions")
    os.makedirs(prediction_output_dir, exist_ok=True)

    feature_output_dir = os.path.join(output_dir, "feature")
    os.makedirs(feature_output_dir, exist_ok=True)

    crop_output_dir = os.path.join(output_dir, "crop")
    os.makedirs(crop_output_dir, exist_ok=True)

    # Generate colors for drawing bounding boxes.
    class_num = 0
    classes_path = os.path.expanduser(FLAGS.classes_path)
    with open(classes_path) as f:
        class_num = len(f.readlines())
    colors = utils.generate_colors(class_num)

    train_bbox = ""
    train_polygon = ""
    train_json = ""
    for img_path in img_path_list:
        img_basename, _ = os.path.splitext(os.path.basename(img_path))
        try:
            image = Image.open(img_path)
        except:
            print('Open Error! Try again!')
            continue
        else:
            result = model.detect_image(image)
            objects = result['objects']
            objects = utils.take_contours(objects)

            # save result image with bounding box
            r_image = utils.make_r_image(image.copy(), objects, colors)
            r_image.save(
                os.path.join(
                    image_output_dir,
                    img_basename + ".jpg",
                ))

            # save feature map of middle layer
            if 'feature' in result:
                feature = result['feature']
                utils.visualize_and_save(
                    feature,
                    os.path.join(feature_output_dir, img_basename + ".png"))
                np.save(
                    os.path.join(feature_output_dir, img_basename + ".npy"),
                    feature)

            train_bbox += img_path
            train_polygon += img_path
            train_json += img_path
            prediction = ""
            json_img_objs_list = []
            for obj in objects:
                # save cropped image
                class_name = obj["class_name"]
                score = obj["score"]
                image_base_name = class_name + "_" + "_".join(
                    [str(s) for s in obj["bbox"]])
                img_crop = image.crop(obj["bbox"])
                img_crop.save(
                    os.path.join(crop_output_dir, image_base_name + ".png"))

                # train_bbox file
                x_min, y_min, x_max, y_max = obj["bbox"]
                coordinates = "{0},{1},{2},{3}".format(x_min, y_min, x_max,
                                                       y_max)
                train_bbox += " {coordinates},{class_id}".format(
                    coordinates=coordinates,
                    class_id=obj["class_id"],
                )
                if 'polygon' in obj:
                    train_polygon += " [{coordinates},{class_id}]".format(
                        coordinates=_list_to_str(obj["polygon"]),
                        class_id=obj["class_id"],
                    )

                json_img_objs = {
                    "bbox": obj["bbox"],
                    "class_id": obj["class_id"],
                    "score": obj["score"],
                }
                if "all_points_x" in obj:
                    json_img_objs["all_points_x"] = obj["all_points_x"]
                    json_img_objs["all_points_y"] = obj["all_points_y"]
                # if "contours" in obj:
                #     json_img_objs["contours"] = [contour.tolist() for contour in obj["contours"]]
                #     json_img_objs["hierarchy"] = [hierarchy.tolist() for hierarchy in obj["hierarchy"]]
                json_img_objs_list.append(json_img_objs)

                # prediction file
                prediction += "{class_name}\t{score}\t{coordinates}\n".format(
                    score=score,
                    class_name=class_name,
                    coordinates="{0}\t{1}\t{2}\t{3}".format(
                        x_min, y_min, x_max, y_max),
                )
            train_bbox += "\n"
            train_polygon += "\n"
            train_json += json.dumps(json_img_objs_list,
                                     cls=NumpyEncoder,
                                     sort_keys=True,
                                     separators=(',', ':'))
            train_json += "\n"
            # save prediction text for each image
            with open(
                    os.path.join(prediction_output_dir, img_basename + ".txt"),
                    "w") as f:
                print(prediction, end="", file=f)

    shutil.copy(os.path.abspath(classes_path),
                os.path.join(output_dir, "classes.txt"))
    # save train_bbox text
    with open(os.path.join(output_dir, "train_bbox.txt"), "w") as f:
        print(train_bbox, end="", file=f)
    # save train_polygon text
    with open(os.path.join(output_dir, "train_polygon.txt"), "w") as f:
        print(train_polygon, end="", file=f)

    with open(os.path.join(output_dir, "train_json.txt"), "w") as f:
        print(train_json, end="", file=f)
    model.close_session()
コード例 #14
0
ファイル: predict.py プロジェクト: wulele2/YOLOv3
anchors = anchors / input_shape[0] # as unit of image

class_path = "./data/coco_classes.txt"
class_names = utils.get_classes(class_path)
num_cls = len(class_names)

"""load model"""
weightsfile = './weights/yolov3.weights'
net = YOLO()
net.to(device)
load_weights(net, weightsfile)
net.eval()

with torch.no_grad():
    image = image.to(device)
    feats = net(image)
    boxes_, scores_, classes_ = filter(
        feats,
        anchors,
        image_size,
        device,
        num_cls,
        threshold=0.4
    )

boxes = boxes_.cpu().numpy()
scores = scores_.cpu().numpy()
classes = classes_.cpu().numpy()
colors = utils.generate_colors(class_names)
utils.draw_boxes(im, scores, boxes, classes, class_names, colors)
im.save("./data/car_out.jpg")