Пример #1
0
 def image_bbox(params, img):
     img_ymax, img_xmax = img.nelec.shape
     px, py = img.equa2pixel(params.u)
     xlim = (np.max([0, int(np.floor(px - pixel_radius))]),
             np.min([img_xmax, int(np.ceil(px + pixel_radius))]))
     ylim = (np.max([0, int(np.floor(py - pixel_radius))]),
             np.min([img_ymax, int(np.ceil(py + pixel_radius))]))
     return xlim, ylim
Пример #2
0
 def image_bbox(params, img):
     img_ymax, img_xmax = img.nelec.shape
     px, py = img.equa2pixel(params.u)
     xlim = (np.max([0,        int(np.floor(px - pixel_radius))]),
             np.min([img_xmax, int(np.ceil(px + pixel_radius))]))
     ylim = (np.max([0,        int(np.floor(py - pixel_radius))]),
             np.min([img_ymax, int(np.ceil(py + pixel_radius))]))
     return xlim, ylim
def MovingWinFeats(x, xLen, fs, winLen, winDisp, featFn):
    y = np.zeros((1, np.floor(((xLen - winLen * fs) / (winDisp * fs)) + 1)))
    y[0] = featFn(x[0:np.round(winLen * fs)])

    a = np.arange(2, np.floor(((xLen - winLen * fs) / (winDisp * fs)) + 1))
    for i in a:
        y[i] = featFn(x[np.ceil(winDisp * fs *
                                (i - 1)):np.ceil(winDisp * fs * (i - 1) +
                                                 winLen * fs)])
Пример #4
0
 def get_bounding_box(params, img):
     if params.is_star():
         bound = img.R
     elif params.is_galaxy():
         bound = gal_funs.gen_galaxy_psf_image_bound(params, img)
     else:
         raise "source type unknown"
     px, py = img.equa2pixel(params.u)
     xlim = (np.max([0,                  np.floor(px - bound)]),
             np.min([img.nelec.shape[1], np.ceil(px + bound)]))
     ylim = (np.max([0,                  np.floor(py - bound)]),
             np.min([img.nelec.shape[0], np.ceil(py + bound)]))
     return xlim, ylim
def MovingWinFeatsFreq(X, xLen, SR, winLen, winDisp, featFn):
    y = np.zeros((np.floor(((xLen - winLen * SR) / (winDisp * SR)) + 1), 8))
    out = featFn(X[0:np.round(winLen * SR)], SR, winLen, winDisp)
    y[0, :] = out[:]

    for i in np.arange(2,
                       np.floor(((xLen - winLen * SR) / (winDisp * SR))) + 2):
        out = []
        out = featFn(
            X[np.ceil(winDisp * SR * (i - 1)):(ceil(winDisp * SR *
                                                    (i - 1) + winLen * SR) -
                                               1)], SR, winLen, winDisp)
        y[i, :] = out[:]
Пример #6
0
    def rasterize_triangles(self, vertices):
        ''' 
        Args:
            vertices: [nver, 3]
            triangles: [ntri, 3]
            h: height
            w: width
        Returns:
            depth_buffer: [h, w] saves the depth, here, the bigger the z, the fronter the point.
            triangle_buffer: [h, w] saves the tri id(-1 for no triangle). 
            barycentric_weight: [h, w, 3] saves corresponding barycentric weight.

        # Each triangle has 3 vertices & Each vertex has 3 coordinates x, y, z.
        # h, w is the size of rendering
        '''
        # initial 
        depth_buffer = {}#np.zeros([h, w]) - 999999. #+ np.min(vertices[2,:]) - 999999. # set the initial z to the farest position
        triangle_buffer = np.zeros([self.h, self.w], dtype = np.int32) - 1  # if tri id = -1, the pixel has no triangle correspondance
        barycentric_weight = {}#np.zeros([h, w, 3], dtype = np.float32)  # 

        for i in range(self.h):
            for j in range(self.w):
                depth_buffer[(i,j)] = -math.inf
                barycentric_weight[(i,j)] = np.array([0, 0, 0])

        for i in range(self.tri_mesh_data.shape[0]):
    #         print('Rasterzing: ',i+1)
            tri = self.tri_mesh_data[i, :] # 3 vertex indices

            # the inner bounding box
            umin = max(int(np.ceil(np.min(vertices[tri, 0]))), 0)
            umax = min(int(np.floor(np.max(vertices[tri, 0]))), self.w-1)

            vmin = max(int(np.ceil(np.min(vertices[tri, 1]))), 0)
            vmax = min(int(np.floor(np.max(vertices[tri, 1]))), self.h-1)

            if umax<umin or vmax<vmin:
                continue

            for u in range(umin, umax+1):
                for v in range(vmin, vmax+1):
                    if not self.isPointInTri([u,v], vertices[tri, :2]): 
                        continue
                    w0, w1, w2 = self.get_point_weight([u, v], vertices[tri, :2]) # barycentric weight
                    point_depth = w0*vertices[tri[0], 2] + w1*vertices[tri[1], 2] + w2*vertices[tri[2], 2]
                    if point_depth > depth_buffer[v, u]:
                        depth_buffer[(v, u)] = point_depth
                        triangle_buffer[v, u] = i
                        barycentric_weight[(v, u)] = np.array([w0, w1, w2])

        return depth_buffer, triangle_buffer, barycentric_weight
Пример #7
0
 def get_bounding_box(params, img):
     if params.is_star():
         bound = img.R
     elif params.is_galaxy():
         bound = gal_funs.gen_galaxy_psf_image_bound(params, img)
     else:
         raise "source type unknown"
     px, py = img.equa2pixel(params.u)
     xlim = (np.max([0, np.floor(px - bound)]),
             np.min([img.nelec.shape[1],
                     np.ceil(px + bound)]))
     ylim = (np.max([0, np.floor(py - bound)]),
             np.min([img.nelec.shape[0],
                     np.ceil(py + bound)]))
     return xlim, ylim
def aveFreqDomain(X, SR):

    #X = zoInterp(X,2);
    x_new = np.tile(x, (numInterp, 1))
    X = reshape(x_new, (1, []))
    L = X.size
    Y = np.fft(X)
    P2 = np.abs(Y / L)
    P1 = P2[0:L / 2 + 1]
    P1[1:-1] = 2 * P1[1:-1]
    f = SR * np.arange(L / 2) / L

    y = np.zeros((1, 5))
    begin = []
    stop = []
    for i in np.arange(5):
        if i == 1:
            begin = np.nonzero(np.floor(f) == 5)
            begin = begin[0]
            stop = np.nonzero(np.floor(f) == 15)
            stop = stop[0]
            y[0] = np.mean(P1[begin:stop + 1])
        elif i == 2:
            begin = np.nonzero(np.floor(f) == 20)
            begin = begin[0]
            stop = np.nonzero(np.floor(f) == 25)
            stop = stop[0]
            y[1] = np.mean(P1[begin:stop + 1])
        elif i == 3:
            begin = np.nonzero(np.floor(f) == 75)
            begin = begin[0]
            stop = np.nonzero(np.floor(f) == 115)
            stop = stop[0]
            y(3) = np.mean(P1[begin:stop + 1])
        elif i == 4:
            begin = np.nonzero(np.floor(f) == 125)
            begin = begin[0]
            stop = np.nonzero(np.floor(f) == 160)
            stop = stop[0]
            y(4) = np.mean(P1[begin:stop + 1])
        elif i == 5:
            begin = np.nonzero(np.floor(f) == 160)
            begin = begin[0]
            stop = np.nonzero(np.floor(f) == 175)
            stop = stop[0]
            y(5) = np.mean(P1[begin:stop + 1])

    return y
Пример #9
0
def generate_L_shaped_domain(nx, ny, Lx1, Lx2, Ly1, Ly2):
    """ 
     Generate L-shape domain, consists of two pieces X1,Y1 (bottom rectangle), X2,Y2 (top rectangle)
     X,Y are coordinate matrices. 
     Ly2 --- Lx1
     | X2,Y2  |
     |        |
     |------Ly1-----Lx2
     |       X1,Y1    |
     0,0----Lx1-----Lx2
     """
    X1,Y1=generate_square_domain(x_min=0, x_max=Lx2, y_min=0, y_max=Ly1,\
        nx=nx, ny=np.floor(ny/2))
    X2,Y2=generate_square_domain(x_min=0, x_max=Lx1, y_min=Ly1, y_max=Ly2,\
        nx=np.floor(nx/2), ny=ny)
    return [[X1, Y1], [X2, Y2]]
Пример #10
0
def get_D_loss_function_wave_eq(D, t_max, L, N):
    """
    Returns loss function for D.
    """
    t_space = np.linspace(0, t_max, N)
    x_space = np.linspace(0, L, N)
    X, T = np.meshgrid(x_space, t_space)
    dDdx = grad(D, 1)
    dDdx2 = grad(dDdx, 1)
    dDdt = grad(D, 2)
    dDdt2 = grad(dDdt, 2)
    #Leave the first few times alone, because the ansatz for D has the t^2 term that defines the behaviour for small t.
    min_t_idx = int(np.floor(X.shape[1] / 10))

    def loss_function(params):
        sum = 0.
        #int(np.floor(X.shape[1]/3)),
        for i in range(X.shape[0]):
            for j in range(min_t_idx, X.shape[1]):
                sum = sum + np.square(
                    D(params, X[i, j], T[i, j]) - X[i, j] * (L - X[i, j]))
                #+np.square(dDdt2(params, X[i,j], T[i,j]))+np.square(dDdx2(params, X[i,j], T[i,j]))
                #+np.square(dDdx2(params, X[i,j], T[i,j]))
                #+np.square(D(params, X[i,j], T[i,j])-1) \
                #
                #sum=sum+np.square(D(params, X[i,j], T[i,j])-np.tanh(T[i,j]))
        #for t in t_space:
        #    sum=sum+np.square(dDdx(params, 0., t))+np.square(dDdx(params, L, t))

        return sum / (X.shape[0] * X.shape[1])

    return loss_function
Пример #11
0
 def __init__(self):
     #set our drift dynamics
     self.f_drift = f_trivial
     self.g_ctrl = g_mono
     self.u = u_step
     self.h = h_single
     
     #set our graph
     n_elements = 10
     n_regions = int(np.floor(n_elements/2))
     self.G = nx.random_regular_graph(4, n_elements)
     self.L = nx.linalg.laplacian_matrix(self.G).todense()
     self.D = np.array(nx.linalg.incidence_matrix(self.G).todense())
     self.D = np.diag(np.ones(shape=(n_elements,)))
     # for each of our elements, assign them to a brain region
     self.e_to_r = np.random.randint(0,n_regions,size=n_elements)
     
     #do our disease layer
     n_symp = 2
     #self.Xi = np.random.randint(0,1,size=(n_regions,n_symp))
     self.Xi = Xi_1
     
     self.P = self.L
     
     self.x_state = np.random.uniform(size=(1000,1))
     
     self.n_regions = n_regions
     self.n_symp = n_symp
     self.n_elements = n_elements
Пример #12
0
    def convergence_check(self, W, Delta_history):
        """
        Parameters
        ----------
        W : `int`
            Window size to use for the convergence check
        Delta_history : `numpy.ndarray`
            Computed Delta values

        Returns
        -------
        bool
            Indicates whether the convergence reached or not
        """
        m = b = np.floor(np.sqrt(W)).astype(int)
        Delta_reshaped = np.reshape(Delta_history[-m*b:],(m,b))
        mu_n = np.mean(Delta_reshaped)
        Delta_batch_means = np.mean(Delta_reshaped,axis=1)
        sigma_n = np.sqrt((m/(b-1))* np.sum((Delta_batch_means - mu_n)**2))
        sd_error = tdist.ppf(1-self._delta/2, df=b-1) * (sigma_n/np.sqrt(m*b))
        lower = mu_n - sd_error
        upper = mu_n + sd_error
        
        if lower<0 and upper>0:
            return True
        else:
            return False
Пример #13
0
 def sample_dataset(self, npts):
     x = np.random.uniform(self.xmin, self.xmax, size=npts)
     heights = np.random.uniform(self.ymin, self.ymax, size=self.num_pieces)
     bins = np.floor((x - self.xmin) / (self.xmax - self.xmin) *
                     self.num_pieces).astype(int)
     y = np.random.normal(heights[bins], self.std)
     return x, y
Пример #14
0
def run_nn(params, input_size, output_size):
    N = params[0]
    W = params[1:]

    integer_part = int(np.floor(N))
    alpha = N - integer_part

    # Network parameters
    layer_sizes = [input_size]
    layer_sizes.extend([output_size for i in range(0, integer_part - 1)])
    L2_reg = 1.0

    # Training parameters
    learning_rate = 0.01
    momentum = 0.1

    # Load and process wines data
    N_data, train_images, train_labels, test_images, test_labels = get_wine_data()
    batch_size = len(train_images)

    # Make neural net functions
    N_weights, pred_fun, loss_fun, frac_err = make_nn_funs(layer_sizes, L2_reg)

    f_out = open(filename, 'w')
    f_out.write("    Train err  |   Test err  |   Alpha\n")
    f_out.close()

    final_test_err = loss_fun(W, alpha, train_images, train_labels)

    print(N, final_test_err)
    return final_test_err
Пример #15
0
    def get_N_legend_values(self):
        curr_N = 10**np.floor(np.log(min(self.all_N)) / np.log(10))
        N_legend_values = [curr_N]
        while curr_N < max(self.all_N):
            curr_N *= 4
            N_legend_values.append(curr_N)

        return N_legend_values
def run_nn(params, learning_rate, momentum, input_size, output_size):
    N = params[0]

    integer_part = int(np.floor(N))
    alpha = N - integer_part

    # Network parameters
    layer_sizes = [input_size]
    layer_sizes.extend([output_size for i in range(0, integer_part - 1)])
    L2_reg = 1.0

    # Training parameters
    param_scale = 0.1
    num_epochs = 200

    # Load and process wines data
    N_data, train_images, train_labels, test_images, test_labels = get_wine_data()
    batch_size = len(train_images)

    # Make neural net functions
    N_weights, pred_fun, loss_fun, frac_err = make_nn_funs(layer_sizes, L2_reg)

    # Gradient with respect to weights
    loss_grad_W = grad(loss_fun, 0)

    # Initialize weights
    rs = npr.RandomState(11)
    W = rs.randn(N_weights) * param_scale

    print("    Train err  |   Test err  |   Alpha   |   Loss   ")
    f_out = open(filename, 'w')
    f_out.write("    Train err  |   Test err  |   Alpha   \n")
    f_out.close()

    def print_perf(epoch, W, alpha, learning_rate, momentum):
        f_out = open(filename, 'a')
        test_perf  = frac_err(W, alpha, test_images, test_labels)
        train_perf = frac_err(W, alpha, train_images, train_labels)
        loss = loss_fun(W, train_images, train_labels, alpha)
        print("{0:15}|{1:15}|{2:15}|{3:15}|{4:15}".format(epoch, train_perf, test_perf, alpha, loss))
        f_out.write("{0:15}|{1:15}|{2:15}|{3:15}|{4:15}\n".format(epoch, train_perf, test_perf, alpha, loss))
        f_out.close()

    # Train with sgd
    batch_idxs = make_batches(train_images.shape[0], batch_size)
    cur_dir_W = np.zeros(N_weights)
    cur_dir_alpha = 0

    for epoch in range(num_epochs):
        print_perf(epoch, W, alpha, learning_rate, momentum)
        for idxs in batch_idxs:
            grad_W = loss_grad_W(W, train_images[idxs], train_labels[idxs], alpha)
            cur_dir_W = momentum * cur_dir_W + (1.0 - momentum) * grad_W
            W = W - learning_rate * cur_dir_W

    final_test_err = loss_fun(W, train_images, train_labels, alpha)
    print(N, final_test_err)
    return final_test_err
Пример #17
0
def gen_n_point_in_polygon(n_point, polygon, tol=0.1):
    """
    -----------
    Description
    -----------
    Generate n regular spaced points within a shapely Polygon geometry
    function from stackoverflow
    -----------
    Parameters
    -----------
    - n_point (int) : number of points required
    - polygon (shapely.geometry.polygon.Polygon) : Polygon geometry
    - tol (float) : spacing tolerance (Default is 0.1)
    -----------
    Returns
    -----------
    - points (list) : generated point geometries
    -----------
    Examples
    -----------
    >>> geom_pts = gen_n_point_in_polygon(200, polygon)
    >>> points_gs = gpd.GeoSeries(geom_pts)
    >>> points_gs.plot()
    """
    # Get the bounds of the polygon
    minx, miny, maxx, maxy = polygon.bounds
    # ---- Initialize spacing and point counter
    spacing = polygon.area / n_point
    point_counter = 0
    # Start while loop to find the better spacing according to tolérance increment
    while point_counter <= n_point:
        # --- Generate grid point coordinates
        x = np.arange(np.floor(minx), int(np.ceil(maxx)), spacing)
        y = np.arange(np.floor(miny), int(np.ceil(maxy)), spacing)
        xx, yy = np.meshgrid(x, y)
        # ----
        pts = [Point(X, Y) for X, Y in zip(xx.ravel(), yy.ravel())]
        # ---- Keep only points in polygons
        points = [pt for pt in pts if pt.within(polygon)]
        # ---- Verify number of point generated
        point_counter = len(points)
        spacing -= tol
    # ---- Return
    return points
Пример #18
0
 def optimize(self, n_iters, objective, init_param):
     """
     Parameters
     ----------
     n_iters : `int`
         Number of iterations of the optimization
     objective: `function`
         Function for constructing the objective and gradient function
     init_param : `numpy.ndarray`, shape(var_param_dim,)
         Initial values of the variational parameters
     int_learning_rate: `float`
         Initial learning rate of optimization (step size to reach the (local) minimum)
         
     Returns
     ----------
     Dictionary
         smoothed_opt_param : `numpy.ndarray`, shape(var_param_dim,)
              Iterate averaged estimated variational parameters 
         variational_param_history : `numpy.ndarray`, shape(n_iters, var_param_dim)
             Estimated variational parameters over all iterations
         value_history : `numpy.ndarray`, shape(n_iters,)
              Estimated loss (ELBO) over all iterations
     """
     t0 = 0
     history = None
     learning_rate = self._sgo._learning_rate
     variational_param = init_param.copy()
     variational_param_mean = init_param.copy()
     value_history = []
     Delta_history = []
     variational_param_history = []
     for t in tqdm.trange(n_iters):
         object_val, object_grad = objective(variational_param)
         value_history.append(object_val)
         descent_dir, history = self._sgo.descent_direction(object_grad, history)
         variational_param -= learning_rate * descent_dir
         variational_param_history.append(variational_param)
         Delta = np.dot(variational_param,descent_dir) - 0.5*learning_rate*np.sum(descent_dir**2)
         Delta_history.append(Delta)
         W = np.max([np.min([t-t0, self._W0]), np.ceil(self._theta*(t-t0)).astype(int)])
         if (W >= self._W0) and (t % self._t_check == 0):
             convg = self.convergence_check(W, Delta_history)
             if convg == True:
                 m = b = np.floor(np.sqrt(W)).astype(int)
                 learning_rate = self._rho * learning_rate
                 variational_param_mean_prev = variational_param_mean
                 variational_param_mean = np.mean(np.array(variational_param_history[-m*b:]),axis = 0)
                 t0 = t
                 SKL = MFGaussian(self._dim)._kl(variational_param_mean_prev, variational_param_mean) + MFGaussian(self._dim)._kl(variational_param_mean, variational_param_mean_prev)       
                 if (SKL/self._rho < self._eps):
                     print('Stopping rule reached at', t+1, 'th iteration')
                     break
     return dict(smoothed_opt_param = variational_param_mean,
                 variational_param_history = variational_param_history,
                 value_history = np.array(value_history)) 
Пример #19
0
def save_rotation_lookup(array_size, n_theta, dest_folder=None):

    image_center = [np.floor(x / 2) for x in array_size]

    coord0 = np.arange(array_size[0])
    coord1 = np.arange(array_size[1])
    coord2 = np.arange(array_size[2])

    coord2_vec = np.tile(coord2, array_size[1])

    coord1_vec = np.tile(coord1, array_size[2])
    coord1_vec = np.reshape(coord1_vec, [array_size[1], array_size[2]])
    coord1_vec = np.reshape(np.transpose(coord1_vec), [-1])

    coord0_vec = np.tile(coord0, [array_size[1] * array_size[2]])
    coord0_vec = np.reshape(coord0_vec, [array_size[1] * array_size[2], array_size[0]])
    coord0_vec = np.reshape(np.transpose(coord0_vec), [-1])

    # move origin to image center
    coord1_vec = coord1_vec - image_center[1]
    coord2_vec = coord2_vec - image_center[2]

    # create matrix of coordinates
    coord_new = np.stack([coord1_vec, coord2_vec]).astype(np.float32)

    # create rotation matrix
    theta_ls = np.linspace(0, 2 * np.pi, n_theta)
    coord_old_ls = []
    for theta in theta_ls:
        m_rot = np.array([[np.cos(theta),  -np.sin(theta)],
                          [np.sin(theta), np.cos(theta)]])
        coord_old = np.matmul(m_rot, coord_new)
        coord1_old = np.round(coord_old[0, :] + image_center[1]).astype(np.int)
        coord2_old = np.round(coord_old[1, :] + image_center[2]).astype(np.int)
        # clip coordinates
        coord1_old = np.clip(coord1_old, 0, array_size[1]-1)
        coord2_old = np.clip(coord2_old, 0, array_size[2]-1)
        coord_old = np.stack([coord1_old, coord2_old], axis=1)
        coord_old_ls.append(coord_old)
    if dest_folder is None:
        dest_folder = 'arrsize_{}_{}_{}_ntheta_{}'.format(array_size[0], array_size[1], array_size[2], n_theta)
    if not os.path.exists(dest_folder):
        os.mkdir(dest_folder)
    for i, arr in enumerate(coord_old_ls):
        np.save(os.path.join(dest_folder, '{:04}'.format(i)), arr)

    coord1_vec = coord1_vec + image_center[1]
    coord1_vec = np.tile(coord1_vec, array_size[0])
    coord2_vec = coord2_vec + image_center[2]
    coord2_vec = np.tile(coord2_vec, array_size[0])
    for i, coord in enumerate([coord0_vec, coord1_vec, coord2_vec]):
        np.save(os.path.join(dest_folder, 'coord{}_vec'.format(i)), coord)

    return coord_old_ls
Пример #20
0
def gen_galaxy_psf_image(th,
                         u_s,
                         img,
                         xlim=None,
                         ylim=None,
                         check_overlap=True,
                         unconstrained=True,
                         return_patch=True):
    """ generates the profile of a combination of exp/dev images.
        Calls the above function twice - once for each profile, and adds them
        together
    """
    # unpack shape params
    theta_s, sig_s, phi_s, rho_s = th[0:4]

    # generate unit flux model patch
    px, py = img.equa2pixel(u_s)
    galmix = MixtureOfGaussians.convex_combine(galaxy_profs,
                                               [theta_s, 1. - theta_s])
    Tinv = gen_galaxy_transformation(sig_s, rho_s, phi_s,
                                     img.cd_at_pixel(px, py))
    amix = galmix.apply_affine(Tinv, np.array([px, py]))
    cmix = amix.convolve(img.psf)

    # compute bounding box
    if xlim is None and ylim is None:
        bound = calc_bounding_radius(cmix.pis,
                                     cmix.means,
                                     cmix.covs,
                                     error=1e-5,
                                     center=np.array([px, py]))
        xlim = (np.max([0, np.floor(px - bound)]),
                np.min([img.nelec.shape[1],
                        np.ceil(px + bound)]))
        ylim = (np.max([0, np.floor(py - bound)]),
                np.min([img.nelec.shape[0],
                        np.ceil(py + bound)]))

    # compute values on grid
    return cmix.evaluate_grid(xlim, ylim), ylim, xlim
Пример #21
0
def build_toy_ca_const(rate, Xstruct):
    #generate a toy calcium trace with constant poisson rate.
    #Outlined code here to make this more general (for Illana stuff)

    # [S1,S2] =  GenCovM(par.x_len,par.y_len,rho,len_sc);
    # C = kron(S2,S1);
    # generate fake place field

    # %for circular boundaries!
    # opts.nxcirc = par.x_len;
    # opts.condthresh = 1e10;
    # [cdiag,U] = mkcov_ASDfactored([len_sc,rho],par.x_len,opts);
    # S1 = U*diag(cdiag)*U';

    # Just using Kron S1 with itself for the generation of the field. (square
    # grid right now)
    # fake_field = scale*mvnrnd(zeros(1,length(S1)*length(S1)),kron(S1,S1))+dc;

    # #specify lengths
    # n1 = length(S1);
    # n2 = par.y_len;

    # [~,ind] = datasample(fake_field,ndata) #draw samples
    #xstim =  sparse(ind,1:length(ind),1) # index the samples at 1 for every location, zeros otherwise
    #xstim = full(xstim); #convert to full matrix
    #fstim = fake_field*full(xstim) #stim dotted with field
    #sprate = np.exp(fstim) #exponential nonlinearity

    logsprate = np.log(rate) * np.ones(
        int(np.floor(Xstruct['T'] / Xstruct['dtSp'])))  #constant (for now)
    spcounts = np.random.poisson(Xstruct['dtSp'] *
                                 np.exp(logsprate))  # poisson spike counts

    ##### Optional Model #####
    #AR2
    if Xstruct['AR2'] is True:
        #AR2
        q = [exp(-par.dt_spk / par.calc_ts),
             exp(-par.dt_spk / par.calc_ts_b)]
        a_true = poly(q)
        z = filter(1, a_true, spcounts)

    else:
        #AR1
        z = lfilter([Xstruct['a']],
                    [1, -np.exp(-Xstruct['dtSp'] / Xstruct['calc_ts'])],
                    spcounts)  #convolve with exp filter

    trace = z + np.sqrt(Xstruct['Gauss_sigma']) * np.random.randn(
        np.size(z))  #add noise

    return trace
Пример #22
0
def advect(f, vx, vy):
    """Move field f according to x and y velocities (u and v)
       using an implicit Euler integrator."""
    rows, cols = f.shape
    cell_ys, cell_xs = np.meshgrid(np.arange(rows), np.arange(cols))
    center_xs = (cell_xs - vx).ravel()
    center_ys = (cell_ys - vy).ravel()

    # Compute indices of source cells.
    left_ix = np.floor(center_xs).astype(np.int)
    top_ix = np.floor(center_ys).astype(np.int)
    rw = center_xs - left_ix  # Relative weight of right-hand cells.
    bw = center_ys - top_ix  # Relative weight of bottom cells.
    left_ix = np.mod(left_ix, rows)  # Wrap around edges of simulation.
    right_ix = np.mod(left_ix + 1, rows)
    top_ix = np.mod(top_ix, cols)
    bot_ix = np.mod(top_ix + 1, cols)

    # A linearly-weighted sum of the 4 surrounding cells.
    flat_f = (1 - rw) * ((1 - bw)*f[left_ix,  top_ix] + bw*f[left_ix,  bot_ix]) \
                 + rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix])
    return np.reshape(flat_f, (rows, cols))
Пример #23
0
    def contact_rate(self, t):

        if self.number_group == 1:
            return 1.
        else:
            # ###### definition of the contact rate

            # contact_full = np.block([ewm(contact_full, np.tile(1-high_risk_distribution, [number_age_group, 1])),
            #                          ewm(contact_full, np.tile(high_risk_distribution, [number_age_group, 1]))])
            # contact_full = np.tile(contact_full, [number_risk_group, 1])

            # contact_full = np.tile(contact_full, [2, 2])

            # contact_full = 5*np.ones((number_group, number_group))

            if self.calendar[np.int(np.floor(t))] == 2:
                contact = self.c_home + self.c_school + self.c_work + self.c_other
            elif self.calendar[np.int(np.floor(t))] == 1:
                contact = self.c_home + self.c_work + self.c_other
            else:
                contact = self.c_home + self.c_other
            # else:
            #     contact = c_home + 0.1 * (c_work + c_other)
            # if calendar[np.int(np.floor(t))] == 1:
            #     contact = c_home + 0.1*(c_work + c_other)
            # else:
            #     contact = c_home

            # # construct contact matrix by splitting each age group into two by low and high risk proportion
            # contact = np.block([ewm(contact, np.tile(1 - high_risk_distribution, [number_age_group, 1])),
            #                          ewm(contact, np.tile(high_risk_distribution, [number_age_group, 1]))])
            # contact = np.tile(contact, [number_risk_group, 1])

            contact = np.tile(contact, [2, 2])

            # constant contact
            # contact = 10*np.ones((number_group, number_group))

            return contact
Пример #24
0
def advect(f, vx, vy):
    """Move field f according to x and y velocities (u and v)
       using an implicit Euler integrator."""
    rows, cols = f.shape
    cell_ys, cell_xs = np.meshgrid(np.arange(rows), np.arange(cols))
    center_xs = (cell_xs - vx).ravel()
    center_ys = (cell_ys - vy).ravel()

    # Compute indices of source cells.
    left_ix = np.floor(center_xs).astype(np.int)
    top_ix  = np.floor(center_ys).astype(np.int)
    rw = center_xs - left_ix              # Relative weight of right-hand cells.
    bw = center_ys - top_ix               # Relative weight of bottom cells.
    left_ix  = np.mod(left_ix,     rows)  # Wrap around edges of simulation.
    right_ix = np.mod(left_ix + 1, rows)
    top_ix   = np.mod(top_ix,      cols)
    bot_ix   = np.mod(top_ix  + 1, cols)

    # A linearly-weighted sum of the 4 surrounding cells.
    flat_f = (1 - rw) * ((1 - bw)*f[left_ix,  top_ix] + bw*f[left_ix,  bot_ix]) \
                 + rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix])
    return np.reshape(flat_f, (rows, cols))
Пример #25
0
def advect(f, vx, vy):
    """Instead of moving the cell centers forward in time using the velocity fields,
  we look for the particles which end up exactly at the cell centers by tracing
  backwards in time from the cell centers. See 'implicit Euler integration.'"""
    rows, cols = f.shape
    cell_xs, cell_ys = np.meshgrid(np.arange(cols), np.arange(rows))
    center_xs = (cell_xs - vx).ravel()  # look backwards one timestep
    center_ys = (cell_ys - vy).ravel()

    left_ix = np.floor(center_ys).astype(int)  # get locations of source cells.
    top_ix = np.floor(center_xs).astype(int)
    rw = center_ys - left_ix  # relative weight of cells on the right
    bw = center_xs - top_ix  # same for cells on the bottom
    left_ix = np.mod(left_ix, rows)  # wrap around edges of simulation.
    right_ix = np.mod(left_ix + 1, rows)
    top_ix = np.mod(top_ix, cols)
    bot_ix = np.mod(top_ix + 1, cols)

    # a linearly-weighted sum of the 4 cells closest to the source of the cell center.
    flat_f = (1 - rw) * ((1 - bw)*f[left_ix,  top_ix] + bw*f[left_ix,  bot_ix]) \
                  + rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix])
    return np.reshape(flat_f, (rows, cols))
Пример #26
0
def fitFlare(x, y, yerr, tstart, tstop, skew_fac=10):
    mask = (x > tstart) & (x < tstop)
    mu0 = (tstart + tstop) / 2
    sig0 = (tstop - tstart) / 2
    A0 = np.max(y) * 100
    skew = 0

    try:
        # Fit a gaussian to the segment
        popt1, pcov1 = curve_fit(fh.gaussian,
                                 x[mask],
                                 y[mask],
                                 p0=(mu0, sig0, A0),
                                 sigma=yerr[mask])
        y_model = fh.gaussian(x[mask], popt1[0], popt1[1], popt1[2])
        chi1 = fh.redChiSq(y_model, y[mask], yerr[mask], len(y[mask]) - 3)

        # Fit the Davenport 2014 flare model to the segment
        popt2, pcov2 = curve_fit(fh.aflare1,
                                 x[mask],
                                 y[mask],
                                 p0=(mu0, sig0, A0),
                                 sigma=yerr[mask])
        y_model = fh.aflare1(x[mask], popt2[0], popt2[1], popt2[2])
        chi2 = fh.redChiSq(y_model, y[mask], yerr[mask], len(y[mask]) - 3)

        # If the flare model fit worked, calculate the skew by centering on the peak of the aflare model
        # Use a window scaled to the FWHM of the flare model for integration
        mu = popt2[0]  #np.trapz(x[mask]*A*y[mask], x[mask])
        f_hwhm = popt2[1] / 2
        t1_skew, t2_skew = mu - skew_fac * f_hwhm, mu + skew_fac * f_hwhm
        skew_mask = (x > t1_skew) & (x < t2_skew)

        # Measure the skew by treating time = x and flux = p(x). Calculate the
        # third moment of p(x)
        A = 1 / np.trapz(y[skew_mask], x[skew_mask])
        var = np.trapz((x[skew_mask] - mu)**2 * A * y[skew_mask], x[skew_mask])
        stddev = np.sqrt(np.fabs(var))
        skew = np.trapz((x[skew_mask] - mu)**3 * A * y[skew_mask],
                        x[skew_mask]) / stddev**3
    except:
        traceback.print_exc()
        empty = np.zeros(3)
        return empty, empty, -1, empty, empty, -1, 0, 0

    n_pts = len(x[mask])
    n_pts_true = np.floor(((tstop - tstart) * u.d).to(u.min).value / 2)
    coverage = n_pts / n_pts_true

    return popt1, np.sqrt(pcov1.diagonal()), chi1, popt2, np.sqrt(
        pcov2.diagonal()), chi2, skew, coverage
Пример #27
0
    def __init__(self, params, dim, n_models, n_same):
        self.d = dim
        mu0, sig0, = params['mu0'], params['sig0']
        muR, sigR, = params['muR'], params['sigR']

        mean = np.zeros((n_models, dim))
        for i in range(n_same):
            sign = 1 if i % 2 == 0 else -1
            mean[i, int(np.floor(i / 2))] = sign * mu0
        for i in range(n_same, n_models):
            sign = 1 if i % 2 == 0 else -1
            mean[i, int(np.floor(
                (i - n_same) / 2)) % dim] = (1. + 0.2 * np.floor(
                    (i - n_same) / 2)) * sign
        self.models = []
        for i in range(n_models):
            self.models = self.models + [
                stats.multivariate_normal(mean[i, :], sig0 * np.eye(dim))
            ]

        meanR = np.zeros(dim)
        meanR[0] = muR
        self.q = stats.multivariate_normal(meanR, sigR * np.eye(dim))
Пример #28
0
    def block_indeces(wtst_data, B=None):
        """ Outputs list of indeces of B mutually exclusive (equal sized) subgroups stratified based on the                   propensity score.
            B = number of blocks
        """
        X1, X2, Y1, Y2 = wtst_data.x1x2y1y2()
        if B == None:
            blocksize = np.sqrt(len(X1[:, 0]))
            # of blocks
            B = np.floor(len(X1[:, 0]) / blocksize)

        propensity_scores = general_utils.propensity_score(X1, X2)
        split_list_x1, split_list_x2 = general_utils.stratify_propensity(
            propensity_scores, X1, B)

        return split_list_x1, split_list_x2
Пример #29
0
def cubic_spline(dx, a=1, b=0):
    """Generate a cubix spline centered on `dx`.

    Parameters
    ----------
    dx: float
        Fractional amount that the kernel will be shifted
    a: float
        Cubic spline sharpness paremeter
    b: float
        Cubic spline shape parameter

    Returns
    -------
    result: array
        Cubic Spline kernel in a window from floor(dx)-1 to floor(dx) + 3
    window: array
        The pixel values for the window containing the kernel
    """
    if np.abs(dx) > 1:
        raise ValueError("The fractional shift dx must be between -1 and 1")

    def inner(x, a, b):
        """Cubic from 0<=abs(x)<=1
        """
        third = (-6 * a - 9 * b + 12) * x ** 3
        second = (6 * a + 12 * b - 18) * x ** 2
        zero = -2 * b + 6
        return (zero + second + third) / 6

    def outer(x, a, b):
        """Cubic from 1<=abs(x)<=2
        """
        third = (-6 * a - b) * x ** 3
        second = (30 * a + 6 * b) * x ** 2
        first = (-48 * a - 12 * b) * x
        zero = 24 * a + 8 * b
        return (zero + first + second + third) / 6

    window = np.arange(-1, 3) + np.floor(dx)
    result = np.zeros(window.shape)
    _x = np.abs(dx - window)
    outer_cut = (_x > 1) & (_x < 2)
    inner_cut = _x <= 1
    result[outer_cut] = outer(_x[outer_cut], a, b)
    result[inner_cut] = inner(_x[inner_cut], a, b)
    return result, np.array(window).astype(int)
def run_nn(N, input_size, output_size):
    integer_part = int(np.floor(N[0]))
    alpha = N[0] - integer_part

    # Network parameters
    layer_sizes = [input_size]
    layer_sizes.extend([output_size for i in range(0, integer_part - 1)])
    L2_reg = 1.0

    # Training parameters
    param_scale = 0.1

    # Load and process wines data
    N_data, train_images, train_labels, test_images, test_labels = get_wine_data()
    batch_size = len(train_images)

    # Make neural net functions
    N_weights, pred_fun, loss_fun, frac_err = make_nn_funs(layer_sizes, L2_reg)

    # Gradient with respect to weights and alpha
    loss_grad_P = grad(loss_fun, 0)

    # Initialize weights
    rs = npr.RandomState(11)
    W = rs.randn(N_weights) * param_scale

    print("    Train err  |   Test err  |   Alpha")
    f_out = open(filename, 'w')
    f_out.write("    Train err  |   Test err  |   Alpha\n")
    f_out.close()

    def print_perf(params):
        f_out = open(filename, 'a')
        test_perf  = frac_err(params, test_images, test_labels)
        train_perf = frac_err(params, train_images, train_labels)
        print("{0:15}|{1:15}|{2:15}".format(train_perf, test_perf, params[-1]))
        f_out.write("{0:15}|{1:15}|{2:15}\n".format(train_perf, test_perf, params[-1]))
        f_out.close()

    # Minimize with BFGS
    res = optimize.minimize(loss_fun, np.append(W, alpha), jac=loss_grad_P, method='L-BFGS-B', \
        args=(train_images, train_labels), options={'disp': True, 'maxiter': 2})
    print(res)

    final_test_err = frac_err(res.x, train_images, train_labels)
    print(N[0], final_test_err)
    return final_test_err
Пример #31
0
def dops(X, Y, T, C, m, alpha, init, eta=0.01, iters=1000, print_every=10):
    # config
    M, d = X.shape
    N = int(np.floor(M / m))
    X, Y = shuffle_data(X, Y)
    Ss = [X[i * m:(i + 1) * m] for i in range(N)]
    zs = [Y[i * m:(i + 1) * m] for i in range(N)]
    maxzs = [e.max() for e in zs]
    alphazs = [[i for i, e in enumerate(z) if e >= maxz * alpha]
               for z, maxz in zip(zs, maxzs)]

    # optimize
    theta = gradient_descent(init, Ss, zs, C, alphazs, eta, iters, print_every)

    # get result
    res = [f_theta(theta, t, C) for t in T]
    return res, theta, np.argmax(res)
Пример #32
0
 def LL(leafMeans,bagSigma):
     NBags = len(bagSigma)
     NInternal_Nodes = np.floor(NBags/2)
     NLeaves = len(leafMeans)
     ll = 0
     Nrows = int(np.ceil(np.log2(NLeaves))) + 1
     for row in range(Nrows):
         for col in range(2**row):
             idx = col
             if row > 0:
                 idx += 2**(row) - 1
             leafIndices = (getChildren(idx, NInternal_Nodes) - NInternal_Nodes).astype(int)
             ln = leafN[leafIndices]
             mu = np.dot(leafMeans[leafIndices],ln)/np.sum(ln)
             sigma = bagSigma[idx]
             ll = ll + (rlambda**row) * logLikelihood(x[idx],mu,sigma,normalize)
     return -1 * ll
Пример #33
0
def gen_toy_ca_GP(N, D, CA_struct, Pois_noise = True,scale_pois = 1):
 	'''
	This function will generate some data with GP statistics with variance 'rh', and length scale 'len_sc'. 
	Data will be generated of length N, with batch size D. Default function will add Gaussian noise
	with marginal variance 'add_noise_var'. 
	Defaults
	sing_GP = False indicates a new GP draw with the same statistics for each Batch
	Pois_noise = False indicates Gaussian noise is added to the GP.
	'''
	#=X = tf.constant(np.tile(np.arange(N),(D,1)),dtype = tf.float32)
	#x = rbf_op(X,D,rh,len_sc)
	M1 = np.array([range(N)])- np.transpose(np.array([range(N)]))
	K = rh*np.exp(-(np.square(M1)/(2*np.square(len_sc))))
	x = np.array(np.random.multivariate_normal(np.zeros(N), K))


	x= [np.log(1 + np.exp(x)) for batch in range(D)]/np.asarray(scale_pois)
	#x = [np.exp(x)/10 for batch in range(D)]
	y = np.random.poisson(x) #poisson spikes from GP
	x = x[0] 



	logsprate = np.log(rate)*np.ones(int(np.floor(Xstruct['T']/Xstruct['dtSp']))) #constant (for now)
	spcounts = np.random.poisson(Xstruct['dtSp']*np.exp(logsprate)) # poisson spike counts


	##### Optional Model #####
	#AR2
	if Xstruct['AR2'] is True:
	    #AR2
	    q = [exp(-par.dt_spk/par.calc_ts),exp(-par.dt_spk/par.calc_ts_b)];
	    a_true = poly(q);
	    z = filter(1,a_true,spcounts);
	    
	else:
	    #AR1
	    z= lfilter([Xstruct['a']],[1,-np.exp(-Xstruct['dtSp']/Xstruct['calc_ts'])],spcounts) #convolve with exp filter


	trace = z + np.sqrt(Xstruct['Gauss_sigma'])*np.random.randn(np.size(z))#add noise


	return trace
Пример #34
0
    def sliding_window_tensor(self, tensor, window_size, stride, operation):
        # grab image size, set container for results
        image_size = tensor.shape[1]
        num_images = tensor.shape[0]
        num_kernels = self.kernels.shape[0]
        results = []

        #### gather indices for all tensor blocks ####
        batch_x = []
        batch_y = []
        # slide window over input image with given window size / stride and function
        for i in np.arange(0, image_size - window_size + 1, stride):
            for j in np.arange(0, image_size - window_size + 1, stride):
                # take a window of input tensor
                batch_x.append(i)
                batch_y.append(j)
        batch_inds = np.asarray([batch_x, batch_y])

        # grab indecies for single image
        b, m, n = tensor.shape
        K = int(np.floor(window_size / 2.0))
        R = np.arange(0, K + 2)
        extractor_inds = R[:, None] * n + R + (batch_inds[0] * n +
                                               batch_inds[1])[:, None, None]

        # extend to the entire tensor
        base = [copy.deepcopy(extractor_inds)]
        ind_size = image_size**2
        for i in range(tensor.shape[0] - 1):
            base.append(extractor_inds + ((i + 1) * ind_size))
        base = np.array(base)

        # extract windows using numpy (to avoid for loops involving kernel weights)
        tensor_windows = tensor.flatten()[base]

        # process tensor windows
        results = []
        if operation == 'convolution':
            results = self.conv_function(tensor_windows)
        if operation == 'pool':
            #print (tensor_windows.shape)
            results = self.pool_function(tensor_windows)
            #print (results.shape)
        return results
Пример #35
0
def lanczos(dx, a=3):
    """Lanczos kernel

    Parameters
    ----------
    dx: float
        amount to shift image
    a: int
        Lanczos window size parameter

    Returns
    -------
    result: array-like
        1D Lanczos kernel
    """
    if np.abs(dx) > 1:
        raise ValueError("The fractional shift dx must be between -1 and 1")
    window = np.arange(-a + 1, a + 1) + np.floor(dx)
    y = np.sinc(dx - window) * np.sinc((dx - window) / a)
    return y, window.astype(int)
Пример #36
0
    def __init__(self,data_obj,p=1,oversampled=0,t_offset=None,precomputed=None,pct_spike=95):
        # some fns. require 'precomputed', a dict with at least two keys theta_star (output of lbfgsb) and fn_obj used in the optimiztion of theta_star
        # t_offset: if oversampled, this is shape (N,), and gives the offset between stim trigger and frame (nbefore).
        self.data_obj = data_obj
        self.F = data_obj.F
        self.nroi = self.F.shape[0]
        self.p = p
        self.b = np.zeros((self.nroi,1,1))
        self.g = np.zeros((self.nroi,self.p,1))
        self.a = np.zeros((self.nroi,1,1))
        self.sn = np.zeros((self.nroi,1,1))
        fudge_factor = .97
        for i in range(self.nroi):
            _,s,self.b[i,0,0],gtemp,_  = deconvolve(data_obj.dfof[i].astype(np.float64),penalty=1,g=tuple([None]*self.p))
            self.g[i,:,0] = np.array(gtemp)
            self.a[i] = np.percentile(s,pct_spike)
            est = estimate_parameters(data_obj.dfof[i].astype(np.float64), p=self.p, fudge_factor=fudge_factor)
            self.sn[i] = est[1]
#        if not type(g) is tuple:
#            g = (g,)
#        self.g = np.array(g)
        #self.fn_obj = fn_obj
        #nangle = len(np.unique(data_obj.angle))
        self.noise = (self.sn**2*(1+(self.g**2).sum(1)[:,np.newaxis]))
        self.smax = 5
        #self.fn_obj.compute_helper_vars(data_obj,self)
        ##self.pFs = [self.p_F_given_s(s) for s in range(self.smax)]
        self.log_pFs = [self.log_p_F_given_s(s) for s in range(self.smax)]
        self.oversampled = oversampled
        if self.oversampled:
            self.sampwt = np.ones((self.oversampled,1))/self.oversampled
            self.sampmat = np.zeros((self.oversampled*(self.F.shape[0]-1),self.F.shape[1]),dtype='bool')
            dig = np.floor(self.oversampled*t_offset).astype('<i2')
            for i in range(self.sampmat.shape[1]):
                self.sampmat[dig::self.oversampled,i] = 1
        if precomputed:
            theta_star = precomputed['theta_star']
            fn_obj = precomputed['fn_obj']
            self.rpre = np.zeros(np.array(self.F.shape)+np.array((0,-1,0))) # one fewer time point required
            for i in range(self.nroi):
                self.rpre[i] = fn_obj.rfunc(theta_star[i][0])
Пример #37
0
    return logsigma + 0.5*np.sum((mu/(np.exp(logsigma) + 0.01))**2) + 0.1/np.exp(logsigma)
nllfunt = lambda x, t : nllfun(x)
gradfun = grad(nllfunt)

# ------ Variational parameters -------
D = 2
seed = 0
init_scale = 2.5 / 3
init_mu = np.array([-1.0, 1.0])
N_iter = 500
alpha = 0.01

# ------ Plot parameters -------
N_samples = 250
N_samples_trails = 5
sample_trails_ix = [int(i) for i in np.floor(np.linspace(0,N_samples, N_samples_trails))][:-1]
N_snapshots = 10
spacing = N_iter / N_snapshots
snapshot_times = range(0,N_iter+1,spacing)
trail_lengths = N_iter
kernel_width = 0.1

num_rings = 3

def make_circle(N=100):
    th = np.linspace(0, 2 * np.pi, N)
    return np.concatenate((np.cos(th)[None, :], np.sin(th)[None, :]), axis=0).T

def run():
    all_xs = np.concatenate([make_circle(N_samples) * init_scale*(i+1) + init_mu for i in range(num_rings)])
Пример #38
0
from __future__ import absolute_import

import autograd.numpy as np
import scipy.stats
from autograd.extend import primitive, defvjp
from autograd.numpy.numpy_vjps import unbroadcast_f

cdf = primitive(scipy.stats.poisson.cdf)
logpmf = primitive(scipy.stats.poisson.logpmf)
pmf = primitive(scipy.stats.poisson.pmf)

def grad_poisson_logpmf(k, mu):
    return np.where(k % 1 == 0, k / mu - 1, 0)

defvjp(cdf, lambda ans, k, mu: unbroadcast_f(mu, lambda g: g * -pmf(np.floor(k), mu)), argnums=[1])
defvjp(logpmf, lambda ans, k, mu: unbroadcast_f(mu, lambda g: g * grad_poisson_logpmf(k, mu)), argnums=[1])
defvjp(pmf, lambda ans, k, mu: unbroadcast_f(mu, lambda g: g * ans * grad_poisson_logpmf(k, mu)), argnums=[1])
                        elif fig_idx == 1:
                            ax1.set_xlabel('video age (day)', fontsize=24)

                        ax1.set_xlim([0, 125])
                        ax1.set_ylim(ymin=max(0, ax1.get_ylim()[0]))
                        ax2.set_ylim([0, 1])
                        ax1.tick_params('y', colors='b')
                        ax2.tick_params('y', colors='k')

                        annotated_str = r'ID: {0}'.format(vid)
                        annotated_str += '\n'
                        annotated_str += r'$C$: {0:.4f}, $\lambda$: {1:.4f}'.format(*optimizer1.x)
                        ax2.text(120, 0.77, annotated_str, horizontalalignment='right', fontsize=24)

                        ax2.set_xticks([0, 40, 80, 120])
                        display_min = int(np.floor(min(daily_view) / 100) * 100)
                        display_max = int(np.ceil(max(daily_view) / 100) * 100)
                        ax1.set_yticks([display_min, (display_min+display_max)/2, display_max])
                        ax2.set_yticks([0.0, 0.5, 1.0])
                        for ax in [ax1, ax2]:
                            plt.setp(ax.yaxis.get_majorticklabels(), rotation=90)
                            ax.tick_params(axis='both', which='major', labelsize=24)

    plt.legend([plt.Line2D((0, 1), (0, 0), color='k', linestyle='--'),
                plt.Line2D((0, 1), (0, 0), color='b'), plt.Line2D((0, 1), (0, 0), color='r')],
               ['Observed relative engagement', 'Observed view series', 'Fitted relative engagement'],
               fontsize=18, frameon=False, handlelength=1,
               loc='lower center', bbox_to_anchor=(0.5, -1.75), ncol=2)

    plt.title('(a)', fontsize=24)
    plt.tight_layout(rect=[0, 0.08, 1, 1], h_pad=0)