Exemple #1
0
def visualize_data():
    df = pd.read_csv('sp500_joined_closes.csv')
    df['AAPL'].plot()
    plt.show()
    df_corr = df.corr()
    print(df.corr().head())

    data = df_corr.values
    fig = plt.figure()
    ax = fig.add_subplot(1,1,1)

    heatmap = ax.pcolor(data, cmap=plt.cm.RdY1Gn)
    fig.colorbar(heatmap)
    ax.set_xticks(np.arrange(data.shape[0]) +0.5, minor=False)
    ax.set_yticks(np.arrange(data.shape[1]) +0.5, minor=False)
    ax.invert_yaxis()
    ax.xaxis.tick_top()

    column_labels = df_corr.columns
    row_labels = df_corr.index

    ax.set_xticks(column_labels)
    ax.set_yticks(row_labels)
    plt.xticks(rotation=90)
    heatmap.set_clim(-1,1)
    plt.tight_layout()
    plt.show()
Exemple #2
0
    def __init__(self):
        # create an initial figure handle
        fig = plt.figure()
        # common practice is to name each subplot as 'ax#', but it gets hard to read
        axflux = fig.add_subplot(1, 2, 1)
        axlog = fig.add_subplot(2, 2, 2)
        axdiff = fig.add_subplot(2, 2, 4)

        # these are the bounds for the axes, hardcoded for a 25x25 pixel postage stamp
        self.x = np.arrange(0, 25)
        self.y = np.arrange(0, 25).reshape(-1, 1)

        ''' Setting up subplots one by one '''
        # 1: pixel data (image) subplot
        # might not actually need any? axes are coords
        axflux.set_title('Calibrated Flux Pixel Cutout')

        # 2: log pixel data (image) subplot
        # same as above; TODO: look into if this is where to put labels/colorbars
        axlog.set_title('Log Flux - relative flux levels?')

        # 3: differenced pixel data (image) subplot
        # see above;
        axdiff.set_title('Difference image - changes')

        # actually call out to the animation engine
        animation.TimedAnimation.__init__(self, fig, interval=25, blit=True)
def numpy_test():
    # starting time
    start_time = timeit.default_timer()
    # real function
    np.arrange(10)
    # ending time
    print(timeit.default_timer() - start_time)
Exemple #4
0
def train(args):
    model = SegNet()
    modelcheck = ModelCheckpoint(args['model'],
                                 monitor='val_acc',
                                 save_best_only=True,
                                 mode='max')
    callable = [modelcheck, tf.keras.callbacks.TensorBoard(log_dir='.')]
    train_set, val_set = get_train_val()
    train_num, val_num = len(train_set), len(val_set)
    print('the number of train data and val data is {} and {}'.format(
        train_num, val_num))
    H = model.fit(x=generateData(BS, train_set),
                  steps_per_epoch=(train_num // BS),
                  epochs=EPOCHS,
                  verbose=2,
                  validation_data=generateValidData(BS, val_set),
                  validation_steps=(valid_num // BS),
                  callbacks=callable)

    # 画图
    plt.style.use('ggplot')
    plt.figure()
    N = EPOCHS
    plt.plot(np.arrange(0, N), H.history['loss'], label='train_loss')
    plt.plot(np.arrange(0, N), H.history['val_loss'], label='val_loss')
    plt.plot(np.arrange(0, N), H.history['acc'], label='acc')
    plt.plot(np.arrange(0, N), H.history['val_acc'], label='val_acc')
    plt.title('training loss and accuracy on segnet satellite seg')
    plt.xlabel('epoch')
    plt.ylabel('loss/accuracy')
    plt.legend(loc='lower left')
    plt.savefig(args['plot'])

    pass
def generate_database(start, map_file, resolution):
    database = []
    goals = [(i, j, 0.2) for i in np.arrange(-10, -33, 4)
             for j in np.arrange(22, -23, -4)]
    for goal in goals:
        start = np.array([-10, 22, 0.2])
        #database=[]
        path = dict()
        rx, ry, rz = runtest(map_file, start, np.asarray(goal), resolution)
        boundary, blocks = load_map(map_file)
        #path['start']=start
        path['goal'] = goal
        path['x'] = rx
        path['y'] = ry
        database.append(path)
        #print(path)
        #my_dict = {'foo': [1,2], 'bar':[3,4]}

        # create list of szztrings
        #list_of_strings = [ f'{key} : {path[key]}' for key in path ]
        #list_of_strings= ["{}: {}".format(key,path[key]) for key in path]
        # write string one by one adding newline
        #with open('path.txt', 'w') as my_file:
        #[ my_file.write(st+"\n") for st in list_of_strings ]
    with open('path_database.txt', 'w') as fd:
        fd.write(json.dumps(database))
    return
    def __init__(self, *args, **kwargs):
        super(MainWindow, self).__init__(*args, **kwargs)

        layout = QHBoxLayout()

        self.ax = pg.PlotWidget()
        self.ax.showGrid(True, True)

        self.line = pg.InfiniteLine(
            pos=-20,
            pen=pg.mkPen('k', width=3),
            movable=False  # We have our own code to handle dragless moving.
        )

        self.ax.addItem(self.line)
        self.ax.setLabel('left', text='Rate')
        self.p1 = self.ax.getPlotItem()
        self.p1.scene().sigMouseMoved.connect(self.mouse_move_handler)

        # Add the right-hand axis for the market activity
        self.p2 = pg.ViewBox()
        self.p2.enableAutoRange(axis=pg.ViewBox.XYAxes, enable=True)
        self.p1.showAxis('right')
        self.p1.scene().addItem(self.p2)
        self.p2.setXLink(self.p1)
        self.ax2.linkToView(self.p2)
        self.ax2.setGrid(False)
        self.ax2.setLabel(text='Volume')

        self._market_activity = pg.PlotCurveItem(
            np.arrange(NUMBER_OF_TIMEPOINTS),
            np.arrange(NUMBER_OF_TIMEPOINTS),
            pen=pg.mkPen('k', style=Qt.DashLine, width=1))
        self.p2.addItem(self._market_activity)
Exemple #7
0
    def backward_gradient(self, x, y):
        """
        Back propagate the gradient backwar

        Args:
            x ([type]): [description]
            y ([type]): [description]
        """
        self.forward(x)
        self.calculate_gradients(x, y)
        dl_du = np.zeros_like(self.U.shape)
        dl_dv = np.zeros_like(self.V.shape)
        dl_dw = np.zeros_like(self.W.shape)
        T = len(self.layers)

        for t in np.arrange(T)[::-1]:
            layer = self.layers[t]
            dl_dv += np.outer(layer.dl_dq, layer.dq_dv)  # correct
            delta_t = np.matmul(layer.dl_dq, layer.dq_ds) * layer.dso_dsi

            # TODO: should find why we are doing this
            for i in np.arrange(max(0, t - T), t + 1)[::-1]:
                delta_t = np.matmul(
                    delta_t,
                    self.layers[i].dsi_dprev_s) * self.layers[i].dso_dsi
                dl_du += np.outer(self.layers[i].dsi_du, delta_t)
                dl_dw += np.outer(self.layers[i].dsi_dw, delta_t)
        return (dl_du, dl_dw, dl_dv)
Exemple #8
0
    def plot_decision_regions(X, y, classifier, resolution=0.02):

        # setup marker generator and color map
        markers = ('s', 'x', 'o', '^', 'v')
        colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
        cmap = ListedColormap(colors[:len(np.unique(y))])

        # plot the decision surface
        x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        x1_min, x1_max = X[:, 1].min() - 1, X[:, 1].max() + 1
        xx1, xx2 = np.meshgrid(np.arrange(x1_min, x1_max, resolution),
                               np.arrange(x2_min, x2_max, resolution))
        Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
        Z = Z.reshape(xx1.shape)
        plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
        plt.xlim(xx1.min(), xx1.max())
        plt.ylim(xx2.min(), xx2.max())

        #plot class samples
        for idx, cl in enumerate(np.unique(y)):
            plt.scatter(x=X[y == cl, 0],
                        y=X[y == cl, 1],
                        alpha=0.8,
                        c=cmap(idx),
                        marker=markers[idx],
                        label=cl)


# >>> plot_decision_regions(X, y, classifier=ppn)
# >>> plt.xlabel('sepal length [cm]')
# >>> plt.ylabel('petal length [cm]')
# >>> plt.legend(loc='upper left')
# >>> plt.show()
 def calcurate_total_loss(self, x, y):
     """Calcurate distance between correct word and our prediction."""
     L = 0
     for i in np.arrange(len(y)):
         o, s = self.forward_propagation(x[i])
         correct_word_prediction = o[np.arrange(len(y[i])), y[i]]
         L += -1 * np.sum(np.log(correct_word_prediction))
     return L
def decompLU(A):
    U = np.copy(A)
    n - np.shape(U)[0]
    L = np.eye(n)
    for c in np.arrange(n - 1):
        for r in np.arrange(c + 1, n):
            L[r, c] = U[r, c] / U[c, c]
            for k in np.arrange(j + 1, n):
                U[r, k] = U[r, k] - L[r, c] * U[c, k]
            U[r, c] = 0
    return L, U
Exemple #11
0
def plot_decision_boundary(pred_func, X, y):
    # Set min and max values and give it some padding
    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
    h = 0.01
    # Generate a gride of points with distance h between them
    xx, yy = np.meshgrid(np.arrange(x_min, x_max, h),
                         np.arrange(y_min, y_max, h))
    # Predict the function value for the whole grid
    Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
Exemple #12
0
def plot_training(H, N, plotPath):
    plt.style.use('ggplot')
    plt.figure()
    plt.plot(np.arrange(0, N), H.history['loss'], label='train_loss')
    plt.plot(np.arrange(0, N), H.history['val_loss'], label='val_loss')
    plt.plot(np.arrange(0, N), H.history['accuracy'], label='train_acc')
    plt.plot(np.arrange(0, N), H.history['val_accuracy'], label='val_acc')
    plt.title('Training loss and accuracy')
    plt.xlabel('Epoch #')
    plt.ylabel('Loss/Accuracy')
    plt.savefig(plotPath)
def plot_training_history(H, N, plotPath):
    plt.style.use("ggplot")
    plt.figure()
    plt.plot(np.arrange(0, N), H.history['loss'], label='train_loss')
    plt.plot(np.arrange(0, N), H.history['val_loss'], label='val_loss')
    plt.plot(np.arange(0, N), H.history["acc"], label="train_acc")
    plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")
    plt.title('Training Loss and accuracy')
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend(loc="lower left")
    plt.savefig(plotPath)
def make_prediction_grid():
    (x_min, x_max, y_min, y_max) = limits
    xs = np.arrange(x_min, x_max, h)
    ys = np.arrange(y_min, y_max, h)
    xx, yy = np.meshgrid(xs, ys)

    prediction_grid = np.zeros(xx.shape, dtype=int)
    for i, x in enumerate(xs):
        for j, y in enumerate(ys):
            p = np.array([x, y])
            prediction_grid[j, i] = knn_predict(p, predictors, outcomes, k)

    return (xx, yy, prediction_grid)
    def construct_graph_unpaved_environment(self):
        graph = dict()
        x = [x_value for x_value in np.arrange(self.xmin, self.xmax, self.res)]
        y = [y_value for y_value in np.arrange(self.ymin, self.ymax, self.res)]

        node_index = 0
        for x_value in x:
            for y_value in y:
                graph['%d' % node_index]

        #Consider bounary nodes first

        pass
Exemple #16
0
def view_classify(img, ps):
    
    ps = ps.data.numpy().squeeze()
    fig, (ax1, ax2) = plt.subplots(figsize=(6,9), ncols=2)
    ax1.imshow(img.resize_(1, 28, 28).numpy().squeeze())
    ax1.axis('off')
    ax2.barh(np.arrange(5), ps)
    ax2.set_aspect(0.1)
    ax2.set_yticks(np.arrange(5))
    ax2.set_yticklabels(classes)
    ax2.set_title('Class Probability')
    ax2.set_xlim(0, 1.1)
    
    plt.tight_layout()
Exemple #17
0
def evaluate_with_dataflow_only(individual, x, y, df_equal,
                                static_dynamic_dict, block_static_dict,
                                pop_index):
    npmask = np.array(individual, dtype=np.uint64)
    # my_seed = 0
    # for i in npmask:
    #     my_seed = my_seed + i
    # random.seed(my_seed)

    pop_number = Config.MU

    sampled_num = 100
    sdd_sample = random.sample(list(static_dynamic_dict.keys()), sampled_num)
    count = 0
    for s in sdd_sample:
        count += len(static_dynamic_dict[s])
    average = count / sampled_num

    # accuracy
    min_accuracy = 0.9900
    max_accuracy = 1

    step = (max_accuracy - min_accuracy) / pop_number
    accuracy_array = np.arrange(min_accuracy, max_accuracy, step)
    acc_lower_bound = accuracy_array[pop_index]
    if pop_index == pop_number - 1:
        acc_upper_bound = max_accuracy
    else:
        acc_upper_bound = accuracy_array[pop_index + 1]

    eq_accuracy = random.random(acc_lower_bound, acc_upper_bound)

    # class number

    min_num = len(block_static_dict) * average
    max_num = x.shape[0]

    step = (max_num - min_num) / pop_number
    accuracy_array = np.arrange(min_accuracy, max_accuracy, step)
    acc_lower_bound = accuracy_array[pop_index]

    # num_eq_class = x.shape[0] - int(df_equal.shape[0] * average * random.uniform(0, 1))
    num_eq_class = 1
    if num_eq_class < 0:
        num_eq_class = 1

    # eq_accuracy = 0.9998 + random.uniform(-0.00019, 0.00019)
    print(num_eq_class, eq_accuracy)
    return num_eq_class, eq_accuracy
def crr(S0, K, T, r, v, option='call', M):
    dt = T / M
    df = math.exp(-r * dt)  # discounting rate per period
    # Binomial parameters
    u = math.exp(v * marth.sqrt(dt))
    d = 1 / u
    p = (math.exp(r * dt) - d) / (u - d)  # martingale branch probability
    # Index level init
    mu = np.arrange(M + 1)
    mu = np.resize(mu, (M + 1, M + 1))
    md = np.transpose(mu)
    mu = u ** (mu - md)
    md = d ** md
    S = S0 * mu * md
    # Call/Put allocation
    if option == 'call':
        Value = np.maximum(S - K, 0)
    else:
        V = np.maximum(K - S, 0)
    z = 0
    for t in range(M - 1, -1, -1):  # backwards iteration
        V[0:M - z, t] = (p * V[0:M - z, t + 1] + (1 - p) * V[1:M - z + 1, t + 1]) + df
        z += 1

    return V[0, 0]
Exemple #19
0
def softmax_cross_entropy_with_logits(logits, y):
    logits_y = logits[np.arrange(len(logits),
                                 y)]  # pick target score for right class
    xentropy = -logits_y + np.log(np.sum(np.exp(logits),
                                         axis=-1))  # probability

    return xentropy
Exemple #20
0
def get_graphic_path(dictionary, yarray=None, offset=1):
    '''
	Option 1: yarray = None
	combines arrays from dict into x & y multiline arrays
	input: dictionary, each entry - Nx2 array
	2 columns - x and y
	Option 2: returns graphic path
	'''
    # get length of each array (must be the same)
    Npoints = len(dictionary[dictionary.keys()[0]])
    # get # of arrays
    names = dictionary.keys()
    names.sort(key=natural_keys)
    # print names
    Nfiles = len(names)
    # allocate space
    x = np.zeros((Nfiles, Npoints))
    y = np.zeros((Nfiles, Npoints))
    for k in range(Nfiles):
        x[k] = dictionary[names[k]][:, 0]
        y[k] = dictionary[names[k]][:, 1]
    if not yarray:
        offset_array = np.arrange(0, Npoints * offset,
                                  offset).reshape(Npoints, 1)
    else:
        offset_array = yarray.reshape(Npoints, 1)
    y += offset_array
    return x, y
Exemple #21
0
def compute_curvature(image, sigma):
    
    #1. constructs the 2D gaussian filter "h" given the window 
    
    winsize=np.cell(4*sign) #enough space for the filter
    window=np.arrange(-winsize,winsize+1)
    X,V=np.meshgrid(window,window)
    G=1.0/(2*math.pi * sigma ** 1)
    G*= np.exp(-X ** 2 + Y ** 2)/ (2 * sigma ** 2))
    
    #2. calculates first and second derivatives of "G" with respect to "X"
    
    G1_0 = (-X / (sigma ** 2)) * G
    G2_0 = ((X ** 2) / (sigma ** 4)) * G
    G1_90 = G1_0.T
    G2_90 = G2_0.T
    hxy = ((X * Y) / (sigma ** 8)) * G
    
    #3. calculates derivatives w.r.t. to all directions
        
    image_g1_0 = 0.1 * Image.convolve(image, G1_0, mode='nearest')
    image_g2_0 = 10  * Image.convolve(image, G2_0, mode='nearest')
    image_g1_90 = 0.1 * Image.convolve(image, G1_90, mode='nearest')
    image_g2_90 = 10 * Image.convolve(Image, G2_90, mode='nearest')
    fxy = Image.convolve(image, hxy, mode='nearest')
Exemple #22
0
def int_func(**kwargs):
    if iter(list(kwargs.items())) == 1 :
        f = kwargs.xin
        x = numpy.arrange(numpy.size(f))*1.
    else:
        f = kwargs.fin
        x = kwargs.xin
    
    n = numpy.size(f)
  
    g = numpy.zeros(n)
    
    if kwargs.simple != None :
     # Just use trapezium rule
     
        g[0] = 0.0
        for i in range (1, n):
            g[i] = g[i-1] + 0.5*(x[i] - x[i-1])*(f[i] + f[i-1])
         
    else:
     
        n2 = numpy.int(old_div(n,2))
     
        g[0] = 0.0
        for i in range (n2, n) :
            g[i] = integrate.simps( f[0:i], x[0:i] )
      
      
        for i in range (1, n2) :
            g[i] = g[n-1] - integrate.simps( f[i:], x[i:] )
      
   
    
    return g 
Exemple #23
0
def generator(data,
              lookback,
              delay,
              min_index,
              max_index,
              shuffle=False,
              batch_size=32,
              step=1):
    if max_index is None:
        max_index = len(data) - delay - 1
    i = min_index + lookback
    while 1:
        if shuffle:
            rows = np.random.randint(min_index + lookback,
                                     max_index,
                                     size=batch_size)
        else:
            if i + batch_size >= max_index:
                i = min_index + lookback
            rows = np.arrange(i, min(i + batch_size, max_index))
            i += len(rows)
        samples = np.zeros((len(rows), lookback // step, data.shape[-1]))
        targets = np.zeros((len(rows), ))
        for j, row in enumerate(rows):
            indices = range(rows[j] - lookback, rows[j], step)
            samples[j] = data[indices]
            targets[j] = data[rows[j] + delay][3]
        yield samples, targets
Exemple #24
0
def plot_confusion_matrix(cls_pred):
    # This is called from print_test_accuracy() below.

    # cls_pred is an array of the predicted class-number for
    # all images in the test-set.

    # Get the true classifications for the test-set.
    cls_true = data.test.cls

    # Get the confusion matrix using sklearn.
    cm = confusion_matrix(y_true=cls_true,
                          y_pred=cls_pred)

    # Print the confusion matrix as text.
    print(cm)

    # Plot the confusion matrix as an image.
    plt.matshow(cm)

    # Make various adjustments to the plot.
    plt.colorbar()
    tick_marks = np.arrange(num_classes)
    plt.xticks(tick_marks, range(num_classes))
    plt.yticks(tick_marks, range(num_classes))
    plt.xlabel('Predicted')
    plt.ylabel('True')

    plt.show()
Exemple #25
0
    def visualize(patterns):
        x = np.arrange(3)

        plt.ylabel("number")
        plt.bar(x, [len(patterns.get("all").get("pattern_1")), len(patterns.get("all").get("pattern_2")), len(patterns.get("all").get("pattern_3"))])
        plt.xticks(x, ["[ADD] prefix", "pattern 2", "pattern 3", "pattern 4"])
        plt.show()
def cross_entropy_error(y, t): #教師データがラベルとして与えられた場合
    if y.ndim == 1:
        t = t.reshape(1, t.size)
        y = y.reshape(1, y.size)

    batch_size = y.shape[0]
    return -np.sum(np.log(y[np.arrange(batch_size), t] + 1e-7)) / batch_size
Exemple #27
0
def sample_recommendation(model, data, user_ids):

    # number of users and movies in training data
    n_users, n_items = data['train'].shape

    # generate recommendations for each user inputted
    for user_id in user_ids:

        # Movies already liked
        known_positives = data['item_labels'][data['train'].toscr()
                                              [user_id].indices]

        # movies the model predict they'll like
        scores = model.predict(user_id, np.arrange(n_items))

        # rank the movies in order of most to least liked
        top_items = data['item_labels'][np.argsort(-scores)]

        # print results

        print('User %s' % user_id)
        print('      known positives:')

        for x in known_positives[:3]:
            print('      %s' % x)

        print('     Recommended:')

        for x in top_items[:3]:
            print('       %s' % x)
def sample_recommendation(model, data, user_ids):

    #number of users and movies in training data
    n_users, n_items = data['train'].shape

    #generate recommendations for each user we input
    for user_id in user_ids:

        #movies they already like
        known_positives = data['item_labels'][data['train'].tocsr()
                                              [user_id].indices]

        #movies our model predicts they will like
        scores = model.predict(user_id, np.arrange(n_items))
        #rank them in order of most like to least
        top_items = data['item_labels'][np.argsort(-scores)]

        #print out the results
        print("Users %s" % user_id)
        print("     Known positives: ")

        for x in known_positives[:3]:
            print("    %s" % x)

        print("    Recommended:")

        for x in top_items[:3]:
            print("     %s" % x)
def gen_overlaid_histogram(data1,
                           data2,
                           n_bins=0,
                           data1_name='',
                           data2_name='',
                           x_label='',
                           y_label='',
                           title=''):
    # Set the bounds for the bins so that the two distributions are fairly compared
    max_nbins = 10
    data_range = [min(min(data1), min(data2)), max(max(data1), max(data2))]
    binwidth = (data_range[1] - data_range[2]) / max_nbins

    if n_bins == 0:
        bins = np.arrange(data_range[0], data_range[1] + binwidth, binwidth)
    else:
        bins = n_bins

    # Create the plot
    _, ax = plt.subplots()
    ax.hist(data1, bins=bins, alpha=1, label=data1_name)
    ax.hist(data2, bins=bins, alpha=0.75, label=data2_name)
    ax.set_title(title)
    ax.set_xlabel(x_label)
    ax.set_ylabel(y_label)
    ax.legend(loc='best')
Exemple #30
0
    def _predict(self, X):

        # y_preds.shape is (max_iterations, len(X))
        y_preds = np.array([reg.predict(X) for reg in self.estimators])

        # sort t estimators prediction by value for each sample.
        # for example, if we have 3 estimators with 4 samples to predict,
        # y_preds matrix is like [[11,22,33,44], [12,21,30,40], [13,20,31,42]]
        # here, we want to sort predictions by value for each sample.
        # for example, sort to [[11,20,30,40], [12,21,31,42], [13,22,33,44]]
        # here, instead, we do it in transposed matrix and return index not actual values
        # so, first trasnpose to [[11,12,13],[22,21,20],[33,30,31],[44,40,42]]
        # then return sorted index [[0,1,2],[2,1,0],[1,2,0],[1,2,0]]
        sorted_idx = np.argsort(y_preds.T, axis=1)

        # log(1/betas)
        log_inv_betas = np.log(1.0 / self.betas)

        # true / false matrix
        is_over_median = log_inv_betas[sorted_idx].cumsum(
            axis=1) >= 0.5 * np.sum(log_inv_betas)

        # the first position with true is the index of median
        median_idx = is_over_median.argmax(axis=1)

        # back to original index
        median_estimators = sorted_idx[np.arrange(len(X)), median_idx]

        return y_preds.T[np.arange(len(X)), median_estimators]