Example #1
0
    def convolve_with_basis(self, signal):
        """
        Convolve each column of the event count matrix with this basis
        :param S:     signal: an array-like data, each series is (1, T) shape
        :return: TxB of inputs convolved with bases
        """
        (T,_) = signal.shape
        (R,B) = self.basis.shape


        # Initialize array for filtered stimulus
        F = np.empty((T,B))

        # Compute convolutions fo each basis vector, one at a time
        for b in np.arange(B):
            F[:,b] = sig.fftconvolve(signal,
                                       np.reshape(self.basis[:,b],(R,1)),
                                       'full')[:T,:]

        # Check for positivity
        if np.amin(self.basis) >= 0 and np.amin(signal) >= 0:
            np.clip(F, 0, np.inf, out=F)
            assert np.amin(F) >= 0, "convolution should be >= 0"

        return F
Example #2
0
def colorize(frames, cmap):
    import matplotlib.pyplot as plt
    cmap = plt.get_cmap(cmap) if isinstance(cmap, str) else cmap
    frames = frames.copy()
    frames -= np.nanmin(frames)
    frames /= np.nanmax(frames)
    return np.clip(cmap(frames)[...,:3]*255, 0, 255)
Example #3
0
 def next_t(path, t, dist):
     p = path.point(t)
     L = path.length()
     # t += 1.0 / np.abs(path.derivative(t))
     itr = 0
     while itr < 20:
         itr += 1
         p1 = path.point(t)
         err = np.abs(p1 - p) - dist
         d1 = path.derivative(t)
         if np.abs(err) < 1e-5:
             return t, p1, d1 / np.abs(d1)
         derr = np.abs(d1) * L
         # do a step in Newton's method (clipped because some of the
         # gradients in the curve are really small)
         t -= np.clip(err / derr, -1e-2, 1e-2)
         t = np.clip(t, 0, 1)
     return t, p, d1 / np.abs(d1)
Example #4
0
def simulator(theta,v):
    a = n
    c = 0
    b = 1
    d = 1
    p=theta
    mu = n*p
    sig = np.sqrt(n*p*(1-p))
    gaussian = mu+sig*v
    gaussian = np.clip(gaussian,0,n)
    return gaussian, sig
Example #5
0
def simulator(theta,N):
    a = n
    c = 0
    b = 1
    d = 1
    p=theta
    mu = n*p
    sig = np.sqrt(n*p*(1-p))
    gaussian = mu+sig
    gaussian = mu+sig*np.random.randn(N)
    gaussian = np.clip(gaussian,0,n)
    return gaussian,sig
Example #6
0
def simulator(n,theta,v):
    a = n
    c = 0
    b = 1
    d = 1
    p=theta
    mu = n*p
    sig2 = np.sqrt(n*p*(1-p))
    gaussian = mu+sig2*v
    #if gaussian<0:
    #    gaussian=0
    #    print "a 0"
    gaussian = np.clip(gaussian,0,n)
    return gaussian
Example #7
0
def simulator(sim_variables,theta,u2):
    '''
    @summary: simulator
    @sim_variables: simulator variables/settings that are fixed
    @param theta: global parameter, drawn from variational distribution
    @param u2: controls simulator randomness
    '''
    n = sim_variables[1]
    p = theta
    mu = n*p
    sig2 = np.sqrt(n*p*(1-p))
    gaussian = mu+sig2*u2
    gaussian = np.clip(gaussian,0,n)
    return gaussian
Example #8
0
  def update_weights(self, params, dparams):
    """
    params: the parameters of the model as they currently exist.
    dparams: the grad of the cost w.r.t. the parameters of the model.

    We update params based on dparams.
    """
    for k in params.keys():
      p = params[k]
      d = dparams[k]

      d = np.clip(d,-10,10)
      self.ns[k] = self.b * self.ns[k] + (1 - self.b) * (d*d)
      self.gs[k] = self.b * self.gs[k] + (1 - self.b) * d
      n = self.ns[k]
      g = self.gs[k]
      self.ms[k] = self.d * self.ms[k] - self.lr * (d / (np.sqrt(n - g*g + 1e-8)))
      self.qs[k] = self.b * self.qs[k] + (1 - self.b) * (l2(self.ms[k]) / l2(p))
      p += self.ms[k]
Example #9
0
def gumbel_softmax(X, tau=1.0, eps=np.finfo(float).eps):
    # element-wise gumbel softmax
    # return np.exp(np.log(X+eps)/temp)/np.sum(np.exp(np.log(X+eps)/temp), axis=1)[:, np.newaxis]
    X_temp = np.clip(X / tau, -32, 32)
    return 1 / (1 + np.exp(X_temp))
Example #10
0
 def invert(self, data, input=None, mask=None, tag=None):
     yhat = self.link(np.clip(data, .1, np.inf))
     return self._invert(yhat, input=input, mask=mask, tag=tag)
Example #11
0
 def _log_hazard(self, params, T, X):
     hz = self._hazard(params, T, X)
     hz = np.clip(hz, 1e-20, np.inf)
     return np.log(hz)
Example #12
0
 def l2_normalize(x):
     #  x / sqrt(max(sum(x**2), epsilon))
     return x / np.sqrt(
         np.clip(np.sum(x**2, axis=-1, keepdims=True), EPSILON, None))
Example #13
0
def binary_crossentropy(actual, predicted):
    """二分类logloss
    """
    predicted = np.clip(predicted, EPS, 1 - EPS)
    return -np.mean(actual * np.log(predicted) +
                    (1 - actual) * np.log(1 - predicted))
Example #14
0
def evaluate_gradient(
    gradient,
    objectPoints,
    P,
    image_pts_measured,
    normalize=False,
):
    x1, y1, x2, y2, x3, y3, x4, y4 = extract_objectpoints_vars(objectPoints)

    gradient.dx1_eval_old = gradient.dx1_eval
    gradient.dy1_eval_old = gradient.dy1_eval

    gradient.dx2_eval_old = gradient.dx2_eval
    gradient.dy2_eval_old = gradient.dy2_eval

    gradient.dx3_eval_old = gradient.dx3_eval
    gradient.dy3_eval_old = gradient.dy3_eval

    gradient.dx4_eval_old = gradient.dx4_eval
    gradient.dy4_eval_old = gradient.dy4_eval

    gradient.dx1_eval = gradient.dx1(x1, y1, x2, y2, x3, y3, x4, y4, P,
                                     image_pts_measured,
                                     normalize) * gradient.n_x1
    gradient.dy1_eval = gradient.dy1(x1, y1, x2, y2, x3, y3, x4, y4, P,
                                     image_pts_measured,
                                     normalize) * gradient.n_y1

    gradient.dx2_eval = gradient.dx2(x1, y1, x2, y2, x3, y3, x4, y4, P,
                                     image_pts_measured,
                                     normalize) * gradient.n_x2
    gradient.dy2_eval = gradient.dy2(x1, y1, x2, y2, x3, y3, x4, y4, P,
                                     image_pts_measured,
                                     normalize) * gradient.n_y2

    gradient.dx3_eval = gradient.dx3(x1, y1, x2, y2, x3, y3, x4, y4, P,
                                     image_pts_measured,
                                     normalize) * gradient.n_x3
    gradient.dy3_eval = gradient.dy3(x1, y1, x2, y2, x3, y3, x4, y4, P,
                                     image_pts_measured,
                                     normalize) * gradient.n_y3

    gradient.dx4_eval = gradient.dx4(x1, y1, x2, y2, x3, y3, x4, y4, P,
                                     image_pts_measured,
                                     normalize) * gradient.n_x4
    gradient.dy4_eval = gradient.dy4(x1, y1, x2, y2, x3, y3, x4, y4, P,
                                     image_pts_measured,
                                     normalize) * gradient.n_y4

    gradient.n_x1 = supersab(gradient.n_x1, gradient.dx1_eval,
                             gradient.dx1_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_x2 = supersab(gradient.n_x2, gradient.dx2_eval,
                             gradient.dx2_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_x3 = supersab(gradient.n_x3, gradient.dx3_eval,
                             gradient.dx3_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_x4 = supersab(gradient.n_x4, gradient.dx4_eval,
                             gradient.dx4_eval_old, gradient.n_pos,
                             gradient.n_neg)

    gradient.n_y1 = supersab(gradient.n_y1, gradient.dy1_eval,
                             gradient.dy1_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_y2 = supersab(gradient.n_y2, gradient.dy2_eval,
                             gradient.dy2_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_y3 = supersab(gradient.n_y3, gradient.dy3_eval,
                             gradient.dy3_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_y4 = supersab(gradient.n_y4, gradient.dy4_eval,
                             gradient.dy4_eval_old, gradient.n_pos,
                             gradient.n_neg)

    ## Limit
    limit = 0.05
    gradient.dx1_eval = np.clip(gradient.dx1_eval, -limit, limit)
    gradient.dy1_eval = np.clip(gradient.dy1_eval, -limit, limit)

    gradient.dx2_eval = np.clip(gradient.dx2_eval, -limit, limit)
    gradient.dy2_eval = np.clip(gradient.dy2_eval, -limit, limit)

    gradient.dx3_eval = np.clip(gradient.dx3_eval, -limit, limit)
    gradient.dy3_eval = np.clip(gradient.dy3_eval, -limit, limit)

    gradient.dx4_eval = np.clip(gradient.dx4_eval, -limit, limit)
    gradient.dy4_eval = np.clip(gradient.dy4_eval, -limit, limit)

    return gradient
Example #15
0
def binary_crossentropy(y_true, y_pred):
    y_pred = np.clip(y_pred, EPSILON, 1 - EPSILON)
    return -np.mean(
        y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred), axis=-1)
Example #16
0
 def _cumulative_hazard(self, params, times):
     lambda_, rho_ = params
     return np.exp(
         rho_ * (np.log(np.clip(times, 1e-25, np.inf)) - np.log(lambda_)))
Example #17
0
 def fun(x):
     return np.clip(x, a_min=0.1, a_max=1.1)
Example #18
0
 def noise(self, x=None, u=None):
     u = np.clip(u, -self._umax, self._umax)
     x = np.clip(x, -self._xmax, self._xmax)
     return self._sigma
Example #19
0
def clip(x, c, axis=None):
    return (x.T / (np.linalg.norm(x, axis=axis) + 1e-9) *
            np.clip(np.linalg.norm(x, axis=axis), 0, c)).T
Example #20
0
def binary_crossentropy(actual, predicted):
    predicted = np.clip(predicted, EPS, 1 - EPS)
    return np.mean(-np.sum(actual * np.log(predicted) +
                           (1 - actual) * np.log(1 - predicted)))
Example #21
0
def logloss(actual, predicted):
    predicted = np.clip(predicted, EPS, 1 - EPS)
    loss = -np.sum(actual * np.log(predicted))
    return loss / float(actual.shape[0])
Example #22
0
def reconstruct_ptychography(fname,
                             probe_pos,
                             probe_size,
                             obj_size,
                             theta_st=0,
                             theta_end=PI,
                             theta_downsample=None,
                             n_epochs='auto',
                             crit_conv_rate=0.03,
                             max_nepochs=200,
                             alpha=1e-7,
                             alpha_d=None,
                             alpha_b=None,
                             gamma=1e-6,
                             learning_rate=1.0,
                             output_folder=None,
                             minibatch_size=None,
                             save_intermediate=False,
                             full_intermediate=False,
                             energy_ev=5000,
                             psize_cm=1e-7,
                             cpu_only=False,
                             save_path='.',
                             phantom_path='phantom',
                             core_parallelization=True,
                             free_prop_cm=None,
                             multiscale_level=1,
                             n_epoch_final_pass=None,
                             initial_guess=None,
                             n_batch_per_update=1,
                             dynamic_rate=True,
                             probe_type='gaussian',
                             probe_initial=None,
                             probe_learning_rate=1e-3,
                             pupil_function=None,
                             probe_circ_mask=0.9,
                             finite_support_mask=None,
                             forward_algorithm='fresnel',
                             dynamic_dropping=True,
                             dropping_threshold=8e-5,
                             n_dp_batch=20,
                             object_type='normal',
                             **kwargs):
    def calculate_loss(obj_delta, obj_beta, this_i_theta, this_pos_batch,
                       this_prj_batch):

        obj_stack = np.stack([obj_delta, obj_beta], axis=3)
        obj_rot = apply_rotation(
            obj_stack, coord_ls[this_i_theta],
            'arrsize_{}_{}_{}_ntheta_{}'.format(*this_obj_size, n_theta))
        probe_pos_batch_ls = []
        exiting_ls = []
        i_dp = 0
        while i_dp < minibatch_size:
            probe_pos_batch_ls.append(
                this_pos_batch[i_dp:min([i_dp + n_dp_batch, minibatch_size])])
            i_dp += n_dp_batch

        # pad if needed
        pad_arr = np.array([[0, 0], [0, 0]])
        if probe_pos[:, 0].min() - probe_size_half[0] < 0:
            pad_len = probe_size_half[0] - probe_pos[:, 0].min()
            obj_rot = np.pad(obj_rot, ((pad_len, 0), (0, 0), (0, 0), (0, 0)),
                             mode='constant')
            pad_arr[0, 0] = pad_len
        if probe_pos[:, 0].max() + probe_size_half[0] > this_obj_size[0]:
            pad_len = probe_pos[:, 0].max(
            ) + probe_size_half[0] - this_obj_size[0]
            obj_rot = np.pad(obj_rot, ((0, pad_len), (0, 0), (0, 0), (0, 0)),
                             mode='constant')
            pad_arr[0, 1] = pad_len
        if probe_pos[:, 1].min() - probe_size_half[1] < 0:
            pad_len = probe_size_half[1] - probe_pos[:, 1].min()
            obj_rot = np.pad(obj_rot, ((0, 0), (pad_len, 0), (0, 0), (0, 0)),
                             mode='constant')
            pad_arr[1, 0] = pad_len
        if probe_pos[:, 1].max() + probe_size_half[1] > this_obj_size[1]:
            pad_len = probe_pos[:, 1].max(
            ) + probe_size_half[0] - this_obj_size[1]
            obj_rot = np.pad(obj_rot, ((0, 0), (0, pad_len), (0, 0), (0, 0)),
                             mode='constant')
            pad_arr[1, 1] = pad_len

        for k, pos_batch in enumerate(probe_pos_batch_ls):
            subobj_ls = []
            for j in range(len(pos_batch)):
                pos = pos_batch[j]
                pos = [int(x) for x in pos]
                pos[0] = pos[0] + pad_arr[0, 0]
                pos[1] = pos[1] + pad_arr[1, 0]
                subobj = obj_rot[pos[0] - probe_size_half[0]:pos[0] -
                                 probe_size_half[0] + probe_size[0],
                                 pos[1] - probe_size_half[1]:pos[1] -
                                 probe_size_half[1] + probe_size[1], :, :]
                subobj_ls.append(subobj)

            subobj_ls = np.stack(subobj_ls)
            exiting = multislice_propagate_cnn(subobj_ls[:, :, :, :, 0],
                                               subobj_ls[:, :, :, :, 1],
                                               probe_real,
                                               probe_imag,
                                               energy_ev,
                                               [psize_cm * ds_level] * 3,
                                               free_prop_cm='inf')
            exiting_ls.append(exiting)
        exiting_ls = np.concatenate(exiting_ls, 0)
        loss = np.mean((np.abs(exiting_ls) - np.abs(this_prj_batch))**2)

        return loss

    comm = MPI.COMM_WORLD
    size = comm.Get_size()
    rank = comm.Get_rank()
    t_zero = time.time()

    # read data
    t0 = time.time()
    print_flush('Reading data...', designate_rank=0, this_rank=rank)
    f = h5py.File(os.path.join(save_path, fname), 'r')
    prj = f['exchange/data']
    n_theta = prj.shape[0]
    prj_theta_ind = np.arange(n_theta, dtype=int)
    theta = -np.linspace(theta_st, theta_end, n_theta, dtype='float32')
    if theta_downsample is not None:
        theta = theta[::theta_downsample]
        prj_theta_ind = prj_theta_ind[::theta_downsample]
        n_theta = len(theta)
    original_shape = [n_theta, *prj.shape[1:]]
    print_flush('Data reading: {} s'.format(time.time() - t0),
                designate_rank=0,
                this_rank=rank)
    print_flush('Data shape: {}'.format(original_shape),
                designate_rank=0,
                this_rank=rank)
    comm.Barrier()

    not_first_level = False

    if output_folder is None:
        output_folder = 'recon_ptycho_minibatch_{}_' \
                        'iter_{}_' \
                        'alphad_{}_' \
                        'alphab_{}_' \
                        'rate_{}_' \
                        'energy_{}_' \
                        'size_{}_' \
                        'ntheta_{}_' \
                        'ms_{}_' \
                        'cpu_{}' \
            .format(minibatch_size,
                    n_epochs, alpha_d, alpha_b,
                    learning_rate, energy_ev,
                    prj.shape[-1], prj.shape[0],
                    multiscale_level, cpu_only)
        if abs(PI - theta_end) < 1e-3:
            output_folder += '_180'
    print_flush('Output folder is {}'.format(output_folder),
                designate_rank=0,
                this_rank=rank)

    if save_path != '.':
        output_folder = os.path.join(save_path, output_folder)

    for ds_level in range(multiscale_level - 1, -1, -1):

        ds_level = 2**ds_level
        print_flush('Multiscale downsampling level: {}'.format(ds_level),
                    designate_rank=0,
                    this_rank=rank)
        comm.Barrier()

        n_pos = len(probe_pos)
        probe_pos = np.array(probe_pos)
        probe_size_half = (np.array(probe_size) / 2).astype('int')
        prj_shape = original_shape
        if ds_level > 1:
            this_obj_size = [int(x / ds_level) for x in obj_size]
        else:
            this_obj_size = obj_size

        dim_y, dim_x = prj_shape[-2:]
        comm.Barrier()

        # read rotation data
        try:
            coord_ls = read_all_origin_coords(
                'arrsize_{}_{}_{}_ntheta_{}'.format(*this_obj_size, n_theta),
                n_theta)
        except:
            if rank == 0:
                print_flush('Saving rotation coordinates...',
                            designate_rank=0,
                            this_rank=rank)
                save_rotation_lookup(this_obj_size, n_theta)
            comm.Barrier()
            coord_ls = read_all_origin_coords(
                'arrsize_{}_{}_{}_ntheta_{}'.format(*this_obj_size, n_theta),
                n_theta)

        if minibatch_size is None:
            minibatch_size = n_theta

        # unify random seed for all threads
        comm.Barrier()
        seed = int(time.time() / 60)
        np.random.seed(seed)
        comm.Barrier()

        if rank == 0:
            if not_first_level == False:
                if initial_guess is None:
                    print_flush('Initializing with Gaussian random.',
                                designate_rank=0,
                                this_rank=rank)
                    obj_delta = np.random.normal(size=this_obj_size,
                                                 loc=8.7e-7,
                                                 scale=1e-7)
                    obj_beta = np.random.normal(size=this_obj_size,
                                                loc=5.1e-8,
                                                scale=1e-8)
                    obj_delta[obj_delta < 0] = 0
                    obj_beta[obj_beta < 0] = 0
                else:
                    print_flush('Using supplied initial guess.',
                                designate_rank=0,
                                this_rank=rank)
                    sys.stdout.flush()
                    obj_delta = initial_guess[0]
                    obj_beta = initial_guess[1]
            else:
                print_flush('Initializing with Gaussian random.',
                            designate_rank=0,
                            this_rank=rank)
                obj_delta = dxchange.read_tiff(
                    os.path.join(output_folder,
                                 'delta_ds_{}.tiff'.format(ds_level * 2)))
                obj_beta = dxchange.read_tiff(
                    os.path.join(output_folder,
                                 'beta_ds_{}.tiff'.format(ds_level * 2)))
                obj_delta = upsample_2x(obj_delta)
                obj_beta = upsample_2x(obj_beta)
                obj_delta += np.random.normal(size=this_obj_size,
                                              loc=8.7e-7,
                                              scale=1e-7)
                obj_beta += np.random.normal(size=this_obj_size,
                                             loc=5.1e-8,
                                             scale=1e-8)
                obj_delta[obj_delta < 0] = 0
                obj_beta[obj_beta < 0] = 0
            if object_type == 'phase_only':
                obj_beta[...] = 0
            elif object_type == 'absorption_only':
                obj_delta[...] = 0
            np.save('init_delta_temp.npy', obj_delta)
            np.save('init_beta_temp.npy', obj_beta)
        comm.Barrier()
        if rank != 0:
            obj_delta = np.zeros(this_obj_size)
            obj_beta = np.zeros(this_obj_size)
            obj_delta[:, :, :] = np.load('init_delta_temp.npy')
            obj_beta[:, :, :] = np.load('init_beta_temp.npy')
        comm.Barrier()
        if rank == 0:
            os.remove('init_delta_temp.npy')
            os.remove('init_beta_temp.npy')
        comm.Barrier()

        print_flush('Initialzing probe...', designate_rank=0)
        if probe_type == 'gaussian':
            probe_mag_sigma = kwargs['probe_mag_sigma']
            probe_phase_sigma = kwargs['probe_phase_sigma']
            probe_phase_max = kwargs['probe_phase_max']
            py = np.arange(probe_size[0]) - (probe_size[0] - 1.) / 2
            px = np.arange(probe_size[1]) - (probe_size[1] - 1.) / 2
            pxx, pyy = np.meshgrid(px, py)
            probe_mag = np.exp(-(pxx**2 + pyy**2) / (2 * probe_mag_sigma**2))
            probe_phase = probe_phase_max * np.exp(-(pxx**2 + pyy**2) /
                                                   (2 * probe_phase_sigma**2))
            probe_real, probe_imag = mag_phase_to_real_imag(
                probe_mag, probe_phase)
        elif probe_type == 'optimizable':
            if probe_initial is not None:
                probe_mag, probe_phase = probe_initial
                probe_real, probe_imag = mag_phase_to_real_imag(
                    probe_mag, probe_phase)
            else:
                back_prop_cm = (free_prop_cm + (psize_cm * obj_delta.shape[2])
                                ) if free_prop_cm is not None else (
                                    psize_cm * obj_delta.shape[2])
                probe_init = create_probe_initial_guess(
                    os.path.join(save_path, fname), back_prop_cm * 1.e7,
                    energy_ev, psize_cm * 1.e7)
                probe_real = probe_init.real
                probe_imag = probe_init.imag
            if pupil_function is not None:
                probe_real = probe_real * pupil_function
                probe_imag = probe_imag * pupil_function
        elif probe_type == 'fixed':
            probe_mag, probe_phase = probe_initial
            probe_real, probe_imag = mag_phase_to_real_imag(
                probe_mag, probe_phase)
        else:
            raise ValueError(
                'Invalid wavefront type. Choose from \'plane\', \'fixed\', \'optimizable\'.'
            )

        # generate Fresnel kernel
        voxel_nm = np.array([psize_cm] * 3) * 1.e7 * ds_level
        lmbda_nm = 1240. / energy_ev
        delta_nm = voxel_nm[-1]
        h = get_kernel(delta_nm, lmbda_nm, voxel_nm, probe_size)

        loss_grad = grad(calculate_loss, [0, 1])

        print_flush('Optimizer started.', designate_rank=0, this_rank=rank)
        if rank == 0:
            create_summary(output_folder, locals(), preset='ptycho')

        cont = True
        i_epoch = 0
        while cont:
            n_pos = len(probe_pos)
            n_spots = n_theta * n_pos
            n_tot_per_batch = minibatch_size * size
            n_batch = int(np.ceil(float(n_spots) / n_tot_per_batch))

            m, v = (None, None)
            t0 = time.time()
            spots_ls = range(n_spots)
            ind_list_rand = []
            t00 = time.time()
            print_flush('Allocating jobs over threads...',
                        designate_rank=0,
                        this_rank=rank)
            # Make a list of all thetas and spot positions
            theta_ls = np.arange(n_theta)
            np.random.shuffle(theta_ls)
            for i, i_theta in enumerate(theta_ls):
                spots_ls = range(n_pos)
                if n_pos % minibatch_size != 0:
                    spots_ls = np.append(
                        spots_ls,
                        np.random.choice(spots_ls[:-(n_pos % minibatch_size)],
                                         minibatch_size -
                                         (n_pos % minibatch_size),
                                         replace=False))
                if i == 0:
                    ind_list_rand = np.vstack(
                        [np.array([i_theta] * len(spots_ls)),
                         spots_ls]).transpose()
                else:
                    ind_list_rand = np.concatenate([
                        ind_list_rand,
                        np.vstack([
                            np.array([i_theta] * len(spots_ls)), spots_ls
                        ]).transpose()
                    ],
                                                   axis=0)
            ind_list_rand = split_tasks(ind_list_rand, n_tot_per_batch)
            print_flush('Allocation done in {} s.'.format(time.time() - t00),
                        designate_rank=0,
                        this_rank=rank)

            for i_batch in range(n_batch):

                t00 = time.time()
                if len(ind_list_rand[i_batch]) < n_tot_per_batch:
                    n_supp = n_tot_per_batch - len(ind_list_rand[i_batch])
                    ind_list_rand[i_batch] = np.concatenate(
                        [ind_list_rand[i_batch], ind_list_rand[0][:n_supp]])

                this_ind_batch = ind_list_rand[i_batch]
                this_i_theta = this_ind_batch[rank * minibatch_size, 0]
                this_ind_rank = np.sort(
                    this_ind_batch[rank * minibatch_size:(rank + 1) *
                                   minibatch_size, 1])

                this_prj_batch = prj[this_i_theta, this_ind_rank]
                this_pos_batch = probe_pos[this_ind_rank]
                if ds_level > 1:
                    this_prj_batch = this_prj_batch[:, :, ::ds_level, ::
                                                    ds_level]
                comm.Barrier()
                grads = loss_grad(obj_delta, obj_beta, this_i_theta,
                                  this_pos_batch, this_prj_batch)
                this_grads = np.array(grads)
                grads = np.zeros_like(this_grads)
                comm.Barrier()
                comm.Allreduce(this_grads, grads)
                grads = grads / size
                (obj_delta, obj_beta), m, v = apply_gradient_adam(
                    np.array([obj_delta, obj_beta]),
                    grads,
                    i_batch,
                    m,
                    v,
                    step_size=learning_rate)
                obj_delta = np.clip(obj_delta, 0, None)
                obj_beta = np.clip(obj_beta, 0, None)
                if rank == 0:
                    dxchange.write_tiff(obj_delta,
                                        fname=os.path.join(
                                            output_folder, 'intermediate',
                                            'current'.format(ds_level)),
                                        dtype='float32',
                                        overwrite=True)
                comm.Barrier()
                print_flush('Minibatch done in {} s (rank {})'.format(
                    time.time() - t00, rank))

            if n_epochs == 'auto':
                pass
            else:
                if i_epoch == n_epochs - 1: cont = False

            if dynamic_dropping:
                print_flush('Dynamic dropping...', 0, rank)
                this_loss_table = np.zeros(n_pos)
                loss_table = np.zeros(n_pos)
                fill_start = 0
                ind_list = np.arange(n_pos)
                if n_pos % size != 0:
                    ind_list = np.append(ind_list,
                                         np.zeros(size - (n_pos % size)))
                while fill_start < n_pos:
                    this_ind_rank = ind_list[fill_start + rank:fill_start +
                                             rank + 1]
                    this_prj_batch = prj[0, this_ind_rank]
                    this_pos_batch = probe_pos[this_ind_rank]
                    this_loss = calculate_loss(obj_delta, obj_beta, 0,
                                               this_pos_batch, this_prj_batch)
                    loss_table[fill_start + rank] = this_loss
                    fill_start += size
                comm.Allreduce(this_loss_table, loss_table)
                loss_table = loss_table[:n_pos]
                drop_ls = np.where(loss_table < dropping_threshold)[0]
                np.delete(probe_pos, drop_ls, axis=0)
                print_flush('Dropped {} spot positions.'.format(len(drop_ls)),
                            0, rank)

            i_epoch = i_epoch + 1

            # if i_epoch == 1:
            #    dxchange.write_tiff(obj_delta, os.path.join(output_folder, 'debug', 'rank_{}'.format(rank)), dtype='float32')

            this_loss = calculate_loss(obj_delta, obj_beta, this_i_theta,
                                       this_pos_batch, this_prj_batch)
            average_loss = 0
            print_flush(
                'Epoch {} (rank {}); loss = {}; Delta-t = {} s; current time = {} s,'
                .format(i_epoch, rank, this_loss,
                        time.time() - t0,
                        time.time() - t_zero))
            # print_flush(
            # 'Average loss = {}.'.format(comm.Allreduce(this_loss, average_loss)))
            if rank == 0:
                dxchange.write_tiff(obj_delta,
                                    fname=os.path.join(
                                        output_folder,
                                        'delta_ds_{}'.format(ds_level)),
                                    dtype='float32',
                                    overwrite=True)
                dxchange.write_tiff(obj_beta,
                                    fname=os.path.join(
                                        output_folder,
                                        'beta_ds_{}'.format(ds_level)),
                                    dtype='float32',
                                    overwrite=True)
            print_flush('Current iteration finished.',
                        designate_rank=0,
                        this_rank=rank)
        comm.Barrier()
Example #23
0
def categorical_crossentropy(y_true, y_pred):
    y_pred = y_pred / np.sum(y_pred, axis=y_pred.ndim - 1, keepdims=True)
    y_pred = np.clip(y_pred, EPSILON, 1 - EPSILON)
    return -np.mean(y_true * np.log(y_pred), axis=y_pred.ndim - 1)
Example #24
0
 def noise(self, x=None, u=None):
     _u = np.clip(u, -self.ulim, self.ulim)
     _x = np.clip(x, -self.xlim, self.xlim)
     return self.sigma
Example #25
0
def kullback_leibler_divergence(y_true, y_pred):
    y_true = np.clip(y_true, EPSILON, 1)
    y_pred = np.clip(y_pred, EPSILON, 1)
    return np.sum(y_true * np.log(y_true / y_pred), axis=-1)
Example #26
0
    def dynamics(self, x, u):
        _u = np.clip(u, -self.ulim, self.ulim)

        # import from: https://github.com/JoeMWatson/input-inference-for-control/
        """
        http://www.lirmm.fr/~chemori/Temp/Wafa/double%20pendule%20inverse.pdf
        """

        # x = [x, th1, th2, dx, dth1, dth2]

        g = 9.81
        Mc = 0.37
        Mp1 = 0.127
        Mp2 = 0.127
        Mt = Mc + Mp1 + Mp2
        L1 = 0.3365
        L2 = 0.3365
        l1 = L1 / 2.
        l2 = L2 / 2.
        J1 = Mp1 * L1 / 12.
        J2 = Mp2 * L2 / 12.

        def f(x, u):

            q = x[0]
            th1 = x[1]
            th2 = x[2]
            dq = x[3]
            dth1 = x[4]
            dth2 = x[5]

            s1 = np.sin(th1)
            c1 = np.cos(th1)
            s2 = np.sin(th2)
            c2 = np.cos(th2)
            sdth = np.sin(th1 - th2)
            cdth = np.cos(th1 - th2)

            # helpers
            l1_mp1_mp2 = Mp1 * l1 + Mp2 * L2
            l1_mp1_mp2_c1 = l1_mp1_mp2 * c1
            Mp2_l2 = Mp2 * l2
            Mp2_l2_c2 = Mp2_l2 * c2
            l1_l2_Mp2 = L1 * l2 * Mp2
            l1_l2_Mp2_cdth = l1_l2_Mp2 * cdth

            # inertia
            M11 = Mt
            M12 = l1_mp1_mp2_c1
            M13 = Mp2_l2_c2
            M21 = l1_mp1_mp2_c1
            M22 = (l1**2) * Mp1 + (L1**2) * Mp2 + J1
            M23 = l1_l2_Mp2_cdth
            M31 = Mp2_l2_c2
            M32 = l1_l2_Mp2_cdth
            M33 = (l2**2) * Mp2 + J2

            # coreolis
            C11 = 0.
            C12 = -l1_mp1_mp2 * dth1 * s1
            C13 = -Mp2_l2 * dth2 * s2
            C21 = 0.
            C22 = 0.
            C23 = l1_l2_Mp2 * dth2 * sdth
            C31 = 0.
            C32 = -l1_l2_Mp2 * dth1 * sdth
            C33 = 0.

            # gravity
            G11 = 0.
            G21 = -(Mp1 * l1 + Mp2 * L1) * g * s1
            G31 = -Mp2 * l2 * g * s2

            # make matrices
            M = np.vstack((np.hstack(
                (M11, M12, M13)), np.hstack(
                    (M21, M22, M23)), np.hstack((M31, M32, M33))))
            C = np.vstack((np.hstack(
                (C11, C12, C13)), np.hstack(
                    (C21, C22, C23)), np.hstack((C31, C32, C33))))
            G = np.vstack((G11, G21, G31))

            action = np.vstack((u, 0.0, 0.0))

            M_inv = np.linalg.inv(M)
            C_dx = np.dot(C, x[3:].reshape((-1, 1)))
            ddx = np.dot(M_inv, action - C_dx - G).squeeze()

            return np.hstack((dq, dth1, dth2, ddx))

        k1 = f(x, _u)
        k2 = f(x + 0.5 * self.dt * k1, _u)
        k3 = f(x + 0.5 * self.dt * k2, _u)
        k4 = f(x + self.dt * k3, _u)

        xn = x + self.dt / 6. * (k1 + 2. * k2 + 2. * k3 + k4)
        xn = np.clip(xn, -self.xlim, self.xlim)

        return xn
Example #27
0
 def fun(x):
     return to_scalar(np.clip(x, a_min=0.1, a_max=1.1))
Example #28
0
def log_loss(y, pred):
    this_pred = np.clip(pred, EPS, 1 - EPS)
    # this_pred = tf.clip_by_value(tf.squeeze(pred), EPS, 1 - EPS)
    # print(y.shape, this_pred.shape)
    return -(y * np.log(this_pred) + (1 - y) * np.log(1 - this_pred))
Example #29
0
def categorical_crossentropy(actual, predicted):
    """多分类logloss,要先OneHotEncoder
    """
    predicted = np.clip(predicted, EPS, 1 - EPS)
    loss = -np.sum(actual * np.log(predicted))
    return loss / float(actual.shape[0])
Example #30
0
    #####################################################################
    # fit star => galaxy proposal distributions
    #
    #   re  - [0, infty], transformation log
    #   ab  - [0, 1], transformation log (ab / (1 - ab))
    #   phi - [0, 180], transformation log (phi / (180 - phi))
    #
    ######################################################################
    import CelestePy.util.data as du
    from sklearn.linear_model import LinearRegression
    coadd_df = du.load_celeste_dataframe("../../data/stripe_82_dataset/coadd_catalog_from_casjobs.fit")

    # make star => radial extent proposal
    star_res = coadd_df.gal_arcsec_scale[ coadd_df.is_star ].values
    star_res = np.clip(star_res, 1e-8, np.inf)
    star_res_proposal = fit_mog(np.log(star_res).reshape((-1,1)), max_comps = 20, mog_class = MixtureOfGaussians)
    with open('star_res_proposal.pkl', 'wb') as f:
        pickle.dump(star_res_proposal, f)

    if False:
        xgrid = np.linspace(np.min(np.log(star_res)), np.max(np.log(star_res)), 100)
        lpdf  = star_res_proposal.logpdf(xgrid.reshape((-1,1)))
        plt.plot(xgrid, np.exp(lpdf))
        plt.hist(np.log(star_res), 25, normed=True)
        plt.hist(np.log(star_res), 25, normed=True, alpha=.24)
        plt.hist(star_res_proposal.rvs(684).flatten(), 25, normed=True, alpha=.24)

    # make star fluxes => gal fluxes for tars
    colors    = ['ug', 'gr', 'ri', 'iz']
    star_mags = np.array([du.colors_to_mags(r, c) 
Example #31
0
 def initialize(self, datas, inputs=None, masks=None, tags=None):
     datas = [
         interpolate_data(data, mask) for data, mask in zip(datas, masks)
     ]
     yhats = [self.link(np.clip(d, .1, np.inf)) for d in datas]
     self._initialize_with_pca(yhats, inputs=inputs, masks=masks, tags=tags)
Example #32
0
def binary_crossentropy(actual, predicted):
    predicted = np.clip(predicted, EPS, 1 - EPS)
    return np.mean(-np.sum(actual * np.log(predicted) +
                           (1 - actual) * np.log(1 - predicted)))
Example #33
0
env_sigma = env.unwrapped.sigma

state = np.zeros((dm_state, nb_steps + 1))
action = np.zeros((dm_act, nb_steps))

state[:, 0] = env.reset()
for t in range(nb_steps):
    solver = MBGPS(env,
                   init_state=tuple([state[:, t], env_sigma]),
                   init_action_sigma=100.,
                   nb_steps=horizon,
                   kl_bound=5.)
    trace = solver.run(nb_iter=10, verbose=False)

    _act = solver.ctl.sample(state[:, t], 0, stoch=False)
    action[:, t] = np.clip(_act, -env.ulim, env.ulim)
    state[:, t + 1], _, _, _ = env.step(action[:, t])

    print('Time Step:', t, 'Cost:', trace[-1])

plt.figure()

plt.subplot(3, 1, 1)
plt.plot(state[0, :], '-b')
plt.subplot(3, 1, 2)
plt.plot(state[1, :], '-b')

plt.subplot(3, 1, 3)
plt.plot(action[0, :], '-g')

plt.show()
Example #34
0
def r_from_s(s, epsilon=1.e-6):
    return np.clip((1. - s + epsilon) / (s + epsilon), epsilon, None)
Example #35
0
def s_from_r(r):
    return np.clip(1. / (1. + r), 0., 1.)
Example #36
0
def mean_squared_logarithmic_error(y_true, y_pred):
    first = np.log(np.clip(y_pred, EPSILON, None) + 1.)
    second = np.log(np.clip(y_true, EPSILON, None) + 1.)
    return np.mean((first - second)**2, axis=-1)
Example #37
0
 def _cumulative_hazard(self, params, times):
     alpha_, beta_ = params
     return np.logaddexp(beta_ * (np.log(np.clip(times, 1e-25, np.inf)) - np.log(alpha_)), 0)
Example #38
0
def evaluate_gradient(gradient, objectPoints, P, normalize=False):
    x1, y1, x2, y2, x3, y3, x4, y4, x5, y5, x6, y6, x7, y7, x8, y8 = extract_objectpoints_vars(
        objectPoints)

    gradient.dx1_eval_old = gradient.dx1_eval
    gradient.dy1_eval_old = gradient.dy1_eval

    gradient.dx2_eval_old = gradient.dx2_eval
    gradient.dy2_eval_old = gradient.dy2_eval

    gradient.dx3_eval_old = gradient.dx3_eval
    gradient.dy3_eval_old = gradient.dy3_eval

    gradient.dx4_eval_old = gradient.dx4_eval
    gradient.dy4_eval_old = gradient.dy4_eval

    gradient.dx5_eval_old = gradient.dx5_eval
    gradient.dy5_eval_old = gradient.dy5_eval

    gradient.dx6_eval_old = gradient.dx6_eval
    gradient.dy6_eval_old = gradient.dy6_eval

    gradient.dx7_eval_old = gradient.dx7_eval
    gradient.dy7_eval_old = gradient.dy7_eval

    gradient.dx8_eval_old = gradient.dx8_eval
    gradient.dy8_eval_old = gradient.dy8_eval

    gradient.dx1_eval = gradient.dx1(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_x1
    gradient.dy1_eval = gradient.dy1(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_y1

    gradient.dx2_eval = gradient.dx2(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_x2
    gradient.dy2_eval = gradient.dy2(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_y2

    gradient.dx3_eval = gradient.dx3(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_x3
    gradient.dy3_eval = gradient.dy3(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_y3

    gradient.dx4_eval = gradient.dx4(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_x4
    gradient.dy4_eval = gradient.dy4(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_y4

    gradient.dx5_eval = gradient.dx5(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_x5
    gradient.dy5_eval = gradient.dy5(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_y5

    gradient.dx6_eval = gradient.dx6(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_x6
    gradient.dy6_eval = gradient.dy6(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_y6

    gradient.dx7_eval = gradient.dx7(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_x7
    gradient.dy7_eval = gradient.dy7(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_y7

    gradient.dx8_eval = gradient.dx8(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_x8
    gradient.dy8_eval = gradient.dy8(x1, y1, x2, y2, x3, y3, x4, y4, x5, y5,
                                     x6, y6, x7, y7, x8, y8, P,
                                     normalize) * gradient.n_y8

    ## Limit
    limit = 0.01
    gradient.dx1_eval = np.clip(gradient.dx1_eval, -limit, limit)
    gradient.dy1_eval = np.clip(gradient.dy1_eval, -limit, limit)

    gradient.dx2_eval = np.clip(gradient.dx2_eval, -limit, limit)
    gradient.dy2_eval = np.clip(gradient.dy2_eval, -limit, limit)

    gradient.dx3_eval = np.clip(gradient.dx3_eval, -limit, limit)
    gradient.dy3_eval = np.clip(gradient.dy3_eval, -limit, limit)

    gradient.dx4_eval = np.clip(gradient.dx4_eval, -limit, limit)
    gradient.dy4_eval = np.clip(gradient.dy4_eval, -limit, limit)

    gradient.dx5_eval = np.clip(gradient.dx5_eval, -limit, limit)
    gradient.dy5_eval = np.clip(gradient.dy5_eval, -limit, limit)

    gradient.dx6_eval = np.clip(gradient.dx6_eval, -limit, limit)
    gradient.dy6_eval = np.clip(gradient.dy6_eval, -limit, limit)

    gradient.dx7_eval = np.clip(gradient.dx7_eval, -limit, limit)
    gradient.dy7_eval = np.clip(gradient.dy7_eval, -limit, limit)

    gradient.dx8_eval = np.clip(gradient.dx8_eval, -limit, limit)
    gradient.dy8_eval = np.clip(gradient.dy8_eval, -limit, limit)

    gradient.n_x1 = supersab(gradient.n_x1, gradient.dx1_eval,
                             gradient.dx1_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_x2 = supersab(gradient.n_x2, gradient.dx2_eval,
                             gradient.dx2_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_x3 = supersab(gradient.n_x3, gradient.dx3_eval,
                             gradient.dx3_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_x4 = supersab(gradient.n_x4, gradient.dx4_eval,
                             gradient.dx4_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_x5 = supersab(gradient.n_x5, gradient.dx5_eval,
                             gradient.dx5_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_x6 = supersab(gradient.n_x6, gradient.dx6_eval,
                             gradient.dx6_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_x7 = supersab(gradient.n_x7, gradient.dx7_eval,
                             gradient.dx7_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_x8 = supersab(gradient.n_x8, gradient.dx8_eval,
                             gradient.dx8_eval_old, gradient.n_pos,
                             gradient.n_neg)

    gradient.n_y1 = supersab(gradient.n_y1, gradient.dy1_eval,
                             gradient.dy1_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_y2 = supersab(gradient.n_y2, gradient.dy2_eval,
                             gradient.dy2_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_y3 = supersab(gradient.n_y3, gradient.dy3_eval,
                             gradient.dy3_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_y4 = supersab(gradient.n_y4, gradient.dy4_eval,
                             gradient.dy4_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_y5 = supersab(gradient.n_y5, gradient.dy5_eval,
                             gradient.dy5_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_y6 = supersab(gradient.n_y6, gradient.dy6_eval,
                             gradient.dy6_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_y7 = supersab(gradient.n_y7, gradient.dy7_eval,
                             gradient.dy7_eval_old, gradient.n_pos,
                             gradient.n_neg)
    gradient.n_y8 = supersab(gradient.n_y8, gradient.dy8_eval,
                             gradient.dy8_eval_old, gradient.n_pos,
                             gradient.n_neg)

    return gradient
Example #39
0
def logloss(actual, predicted):
    predicted = np.clip(predicted, EPS, 1 - EPS)
    loss = -np.sum(actual * np.log(predicted))
    return loss / float(actual.shape[0])
Example #40
0
 def fun(x): return to_scalar(np.clip(x, a_min=0.1, a_max=1.1))
 d_fun = lambda x : to_scalar(grad(fun)(x))
Example #41
0
 def fun(x): return to_scalar(np.clip(x, a_min=0.1, a_max=1.1))
 d_fun = lambda x : to_scalar(grad(fun)(x))
Example #42
0
def mean_absolute_percentage_error(y_true, y_pred):
    diff = np.abs((y_true - y_pred) / np.clip(y_true, EPSILON, None))
    return 100. * np.mean(diff, axis=-1)