Exemplo n.º 1
0
    def to_world(self, *cords):
        """takes a screen point (x,y) and translates it
        to world coordinates, i.e. according to axis
        plot.to_world(x, y)   -> (x , y)
        plot.to_world((x, y)) -> (x , y)
        """
        x, y = tools.cordinp(*cords)

        x = tools.scale(x, 0, opts.sw, self.lo_x, self.hi_x)
        y = tools.scale(y, 0, opts.sh, self.hi_y, self.lo_y)  # invert y axis

        return np.array((x, y))
Exemplo n.º 2
0
    def to_screen(self, *cords):
        """takes a world point (x,y) and translates it
        to screen coordinates, i.e. according to axis
        plot.to_screen(x, y)   -> (x, y)
        plot.to_screen((x, y)) -> (x, y)
        """
        x, y = tools.cordinp(*cords)

        x = tools.scale(x, self.lo_x, self.hi_x, opts.bw, opts.sw - opts.bw)
        y = tools.scale(y, self.hi_y, self.lo_y, opts.bw,
                        opts.sh - opts.bw)  # invert y axis

        #print(x,y)
        try:
            return np.array((int(x), int(y)))
        except OverflowError:
            return np.array((int(x), 1E10))
Exemplo n.º 3
0
def main():
    images, data, target = tools.load_data()

    X_train, X_test, y_train, y_test = train_test_split(data,
                                                        target,
                                                        test_size=0.2,
                                                        random_state=10)
    scaler, scaled_X_train = tools.scale(X_train)
    pca, reduced_X_train = tools.reduce(scaled_X_train, 40)
    clf = tools.ANN(reduced_X_train, y_train)

    img1 = plt.imread('data/detection-images/detection-1.jpg')
    preds = tools.sliding_window(clf, img1, scaler, pca)
    tools.plot_prediction_windows(img1, preds, (10, 10))

    img2 = plt.imread('data/detection-images/detection-2.jpg')
    preds = tools.sliding_window(clf, img2, scaler, pca)
    tools.plot_prediction_windows(img2, preds, (15, 10))
Exemplo n.º 4
0
class MRlmatch(MRJob):
    """ A  map-reduce job that calculates the ramp factor """

    DUMP = False
    SIZE = 64
    VECTOR = True
    #MATCH = tools.rnormalize(tools.scale(tools.sin2wave(SIZE), 60, -60), -60, 0)
    MATCH = tools.rnormalize(tools.scale(tools.sinwave(SIZE), 60, -60), -60, 0)

    def mapper(self, _, line):
        """ The mapper loads a track and yields its ramp factor """
        t = track.load_track(line)
        segments = t['segments']
        duration = t['duration']
        xdata = []
        ydata = []
        for i in xrange(len(segments)):
            seg = segments[i]
            sloudness = seg['loudness_max']
            sstart = seg['start'] + seg['loudness_max_time']
            xdata.append(sstart)
            ydata.append(sloudness)

        if duration > 20:
            idata = tools.interpolate(xdata, ydata, int(duration) * 10)
            smooth = tools.smooth(idata, 20)
            samp = tools.sample(smooth, self.SIZE)
            ndata = tools.rnormalize(samp, -60, 0)
            if self.DUMP:
                for i, (x, y) in enumerate(zip(self.MATCH, ndata)):
                    print i, x, y
            if self.VECTOR:
                yield (t['artist_name'], t['title'], t['track_id']), ndata
            else:
                distance = tools.distance(self.MATCH, ndata)
                yield (t['artist_name'], t['title'], t['track_id']), distance
Exemplo n.º 5
0
def main():
    images, data, target = tools.load_data()
    X_train, X_test, y_train, y_test = train_test_split(data,
                                                        target,
                                                        test_size=0.2,
                                                        random_state=10)

    scaler, scaled_X_train = tools.scale(X_train)
    pca, reduced_X_train = tools.reduce(scaled_X_train, 40)

    params = {
        'hidden_layer_sizes': [pow(10, x) for x in range(4)],
        'alpha': [0.01, 0.001, 0.0001, 0.00001]
    }

    cv = GridSearchCV(param_grid=params,
                      estimator=MLPClassifier(random_state=10),
                      cv=10,
                      n_jobs=-1)
    cv.fit(reduced_X_train, y_train)

    print("Train accuracy:", cv.score(reduced_X_train, y_train))

    scaled_X_test = scaler.transform(X_test)
    reduced_X_test = pca.transform(scaled_X_test)

    print("Test accuracy:", cv.score(reduced_X_test, y_test))

    print(cv.best_params_)

    clf = MLPClassifier(hidden_layer_sizes=1000, alpha=0.0001, max_iter=1000)
    clf.fit(reduced_X_train, y_train)
    print("Train accuracy:", clf.score(reduced_X_train, y_train))
    print("Test accuracy:", clf.score(reduced_X_test, y_test))

    bad_images = []
    for i in range(len(X_test)):
        if clf.predict([reduced_X_test[i]])[0] != y_test[i]:
            bad_images.append(i)
    print("Wrongly classified images:", bad_images)

    X_test = [np.reshape(X_test[i], [20, 20]) for i in range(len(X_test))]
    fig, axs = plt.subplots(2, 3, figsize=(8, 6))
    example_images = [0, 101, 300, 619, 1000, 1212]
    img = example_images[0]

    pred = clf.predict([reduced_X_test[img]])
    axs[0, 0].set_title('Prediction: ' + pred[0].capitalize())
    axs[0, 0].imshow(X_test[img])

    img = example_images[1]
    pred = clf.predict([reduced_X_test[img]])
    axs[0, 1].set_title('Prediction: ' + pred[0].capitalize())
    axs[0, 1].imshow(X_test[img])

    img = example_images[2]
    pred = clf.predict([reduced_X_test[img]])
    axs[0, 2].set_title('Prediction: ' + pred[0].capitalize())
    axs[0, 2].imshow(X_test[img])

    img = example_images[3]
    pred = clf.predict([reduced_X_test[img]])
    axs[1, 0].set_title('Prediction: ' + pred[0].capitalize())
    axs[1, 0].imshow(X_test[img])

    img = example_images[4]
    pred = clf.predict([reduced_X_test[img]])
    axs[1, 1].set_title('Prediction: ' + pred[0].capitalize())
    axs[1, 1].imshow(X_test[img])

    img = example_images[5]
    pred = clf.predict([reduced_X_test[img]])
    axs[1, 2].set_title('Prediction: ' + pred[0].capitalize())
    axs[1, 2].imshow(X_test[img])

    plt.show()

    tools.plot_confusion_matrix(y_test, clf.predict(reduced_X_test),
                                clf.classes_, "Confusion matrix")
Exemplo n.º 6
0
            MountainCarPrediction.get_return((position, velocity))

# create the plot
sns.set_style('ticks')
fig, ax = plt.subplots(dpi=300, figsize=(6, 4))
sns.heatmap(return_grid,
            vmax=0,
            cmap='cividis',
            square=True,
            ax=ax)
for _, spine in ax.spines.items():
    spine.set_visible(True)
fig.axes[1].artists[0]._linewidth = list(ax.spines.values())[0]._linewidth
ax.set_xlabel('Position', labelpad=5)
plt.xticks(scale(np.array([- 1.0, - 0.5, 0.0, 0.5]),
                 MountainCar.MIN_POSITION,
                 MountainCar.MAX_POSITION,
                 0,
                 24),
           labels=[- 1.0, - 0.5, 0.0, 0.5],
           rotation='horizontal')
ax.set_ylabel('Velocity', labelpad=5)
plt.yticks(scale(np.array([- 0.06, - 0.03, 0.0, 0.03, 0.06]),
                 MountainCar.MIN_VELOCITY,
                 MountainCar.MAX_VELOCITY,
                 0,
                 24),
           labels=[- 0.06, - 0.03, 0.0, 0.03, 0.06],
           rotation='horizontal')
fig.savefig('state_values.pdf', bbox_inches='tight')
Exemplo n.º 7
0
def dump():
    data = tools.rnormalize(tools.scale(tools.sin2wave(256), 60, -60), -60, 0)
    for d in data:
        print d
Exemplo n.º 8
0
def dump():
    data = tools.rnormalize(tools.scale(tools.sin2wave(256), 60, -60), -60, 0)
    for d in data:
        print d