Пример #1
0
    def readGeo(self):
        '''
        Read out the *_geom.tab file, in which the column are:
        col, time, lat, lon, mrad, srad, rvel, tvel, sza, phase
        '''
        self.geoinfo = []
        geofile = self.datapath + '/s_' + self.trackID + '_geom.tab'
        try:
            with open(geofile,'r') as f:
                for line in f:
                    line = line.replace('\r\n','').split(',')
                    # read lat, lon, Hmars, Hsat
                    self.geoinfo.append([float(line[2]),float(line[3]),float(line[4])*1000,float(line[5])*1000])

            self.geoinfo = np.asarray(self.geoinfo)
            # delay time between satellite and the Mars aeroid
            time = 2 * (self.geoinfo[:,3] - self.geoinfo[:,2]) /self.C
            time = time.reshape((len(time),1))
            self.geoinfo = np.concatenate((self.geoinfo,time),axis = 1)
            # keep lat and lon to the end of self.geoinfo
            # before replacing them to mapx, mapy
            #self.geoinfo = np.concatenate((self.geoinfo, self.geoinfo[:,:2]), axis = 1)

            # transform from Mars 2000 lat/lon to MOLA sphere projection map coordinate x/y
            ptsx,ptsy = pj.transform(self.mars2000ll, self.molaeqc, self.geoinfo[:,1],self.geoinfo[:,0])
            self.geoinfo[:,0] = ptsx
            self.geoinfo[:,1] = ptsy
        except:
            self.geoinfo = None
Пример #2
0
def get_era_winds(m, rawdatapath, start_year, end_year, start_month,  end_month, xyres):

	wind_data = Dataset(rawdatapath+'/WINDS/ERA/DATA/ERAI_WINDS_MONTHLY_1979-2014.nc', 'r')
	lats = wind_data.variables['latitude'][::-1]
	lons = wind_data.variables['longitude'][:]
	time = wind_data.variables['time'][:]/(24.*30.)
	time = time-time[0]
	time=time.reshape(time.shape[0]/12,12)
	u10 = wind_data.variables['u10'][:, ::-1, :]
	v10 = wind_data.variables['v10'][:, ::-1, :]
	u10=u10.reshape(u10.shape[0]/12, 12,u10.shape[1],u10.shape[2])
	v10=v10.reshape(v10.shape[0]/12, 12, v10.shape[1],v10.shape[2])

	u10_winter_mean= np.mean(u10[start_year-1979:end_year-1979+1,start_month:end_month+1], axis=tuple(range(0, 2)))
	v10_winter_mean= np.mean(v10[start_year-1979:end_year-1979+1,start_month:end_month+1], axis=tuple(range(0, 2)))


	u10_winter_meanS, lonsS = shiftgrid(180.,u10_winter_mean,lons,start=False)
	v10_winter_meanS, lonsS = shiftgrid(180.,v10_winter_mean,lons,start=False)

	u10_winter_meanSC, lonsSC = addcyclic(u10_winter_meanS, lonsS)
	v10_winter_meanSC, lonsSC = addcyclic(v10_winter_meanS, lonsS)

	xyres=100

	xvel,yvel,xptsW,yptsW = m.transform_vector(u10_winter_meanSC,v10_winter_meanSC,lonsSC,lats,xyres,xyres,returnxy=True,masked=True)
	wind_speed = sqrt((xvel**2) + (yvel**2))


	wind_speed = sqrt((xvel**2) + (yvel**2))
	return xptsW, yptsW, xvel, yvel, wind_speed
def propagateSignal(sig, time, fs, freq=None, tone=None):
    # to handle single scalar time shift
    if not isinstance(time, np.ndarray):
        time = np.array([time])

    # to handle 1-D input
    if sig.ndim == 1:
        sig = sig.reshape(
            (1, -1))  # automatic 2-d row vector detection using -1

    # generate a tone if no tone is passed in and a freqshift is desired
    if freq is not None and tone is None:
        print('Generating tone for freq shift..')
        tone = np.exp(1j * 2 * np.pi * freq * np.arange(sig.shape[1]) / fs)

    # propagate the signal in time
    sigfft = np.fft.fft(sig)  # this automatically ffts each row
    sigFreq = makeFreq(sig.shape[1], fs).reshape(
        (1, sig.shape[1]))  # construct 2-d, row vector
    mat = np.exp(
        1j * 2 * np.pi * sigFreq * -time.reshape((len(time), 1))
    )  # construct 2d matrix for each row having its own time shift
    preifft = mat * sigfft
    result = np.fft.ifft(preifft)

    # no freq shift, just return
    if tone is None:
        return result
    # otherwise return the freqshifted version with the tone
    else:
        print('Returning shifted signal + tone used.')
        return result * tone, tone
Пример #4
0
    def rolling_max_slope(data, base=0.00006, action='buy'):
        if action == 'buy':
            is_buy = True
            is_sell = False
        elif action == 'sell':
            is_buy = False
            is_sell = True

        l_ = len(data)
        tmp = data[:, :, 0]
        if is_buy:
            tmp1 = torch.zeros(tmp.shape)
        elif is_sell:
            tmp1 = torch.ones(tmp.shape)*100
        ret = torch.cat((tmp, tmp1), 0)
        ret = ret.unfold(0, l_, 1)
        ret = ret[:-1]
        print(ret.shape)

        if is_buy:
            diff = ret[1:] - ret[0] - base
        if is_sell:
            diff = ret[0] - ret[1:] - base

        time = torch.range(1, len(diff))
        time = time.reshape(-1, 1, 1)

        slope = diff/time.float()
        max_slope, index = torch.max(slope, dim=0)
        time = index.float()+1
        max_slope = max_slope/time
        max_slope = max_slope.permute(1, 0)
        max_slope = max_slope.unsqueeze(2)
        data = torch.cat((data, max_slope), 2)
        return data
Пример #5
0
    def _prepare_input(self):
        if self.uinput is None:
            uinput = 0.0
        else:
            time = numpy.r_[0.0:self.simulation_length:self.integrator.dt]
            self.stimulus.configure_time(time.reshape((1, -1)))
            uinput = numpy.zeros((self.model.nvar, self.number_of_nodes, 1))

        return uinput
Пример #6
0
def run_epoch(session, m, data, time, eval_op, state=None):
    """Runs the model on the given data."""
    x = data.reshape((1, 1, input_dim+types))
    t = time.reshape((1, 1, 1))
    prob, _state, _ = session.run([m._prob, m.final_state, eval_op],
                         {m.input_data: x,
                          # m.input_time: t,
                          m.initial_state: state})
    return prob, _state
Пример #7
0
 def _prepare_stimulus(self):
     if self.stimulus is None:
         stimulus = 0.0
     else:
         time = numpy.r_[0.0:self.simulation_length:self.integrator.dt]
         self.stimulus.configure_time(time.reshape((1, -1)))
         stimulus = numpy.zeros((self.model.nvar, self.number_of_nodes, 1))
         self.log.debug("stimulus shape is: %s", stimulus.shape)
     return stimulus
Пример #8
0
    def get_ksens_at_res_time(self, ksens, time_array, res_time):
        '''
        Helper function that takes the full time history of kinetic 
        sensitivities and returns the data at the time step for which
        the residence time occurs. Using linear interpolation if needed.

        Parameters
        ----------
        ksens : numpy array
            Three dimensional numpy array that contains kinetic sensitivities.
        time_array : pandas series
            Time column of time history pandas data frame.
        res_time : float
            Residence time value.

        Returns
        -------
        ksens_array : numpy array
            kinetic sensitivity array where all times but the residence time
            have been removed.
        temp_arrays : numpy array
            Variable for testing.

        '''
        ksens_array = []
        temp_arrays = []
        for sheet in range(ksens.shape[2]):
            temp = ksens[:, :, sheet]
            time = time_array.values
            time = time.reshape((time.shape[0], 1))
            temp_with_time = np.hstack((time, temp))
            df = copy.deepcopy(temp_with_time)
            df = pd.DataFrame(temp_with_time)
            df = df.rename(columns={0: 'time'})
            temp_arrays.append(df)
            df.loc[-1, 'time'] = float(res_time)

            df = df.sort_values('time').reset_index(drop=True)

            df = df.interpolate()
            res_time_k_sens_data = df.iloc[(df['time'] -
                                            res_time).abs().argsort()[:1]]
            res_time_k_sens_data = res_time_k_sens_data.reset_index(drop=True)
            res_time_k_sens_data = res_time_k_sens_data.drop(columns="time")
            res_time_k_sens_data = res_time_k_sens_data.to_numpy()

            res_time_k_sens_data = res_time_k_sens_data.reshape(
                (res_time_k_sens_data.shape[0], res_time_k_sens_data.shape[1],
                 1))
            ksens_array.append(res_time_k_sens_data)

        ksens_array = np.dstack((ksens_array))

        return ksens_array, temp_arrays
Пример #9
0
def parse_datafiles(targetfile, binno, outdir):

    item = targetfile
    # for item in filelist:
    f = open(item, 'r')
    a = f.readlines()

    binnumber = 1024

    counter = 0

    spectra = np.zeros((0, binnumber))
    timetracker = 0
    energy_deposited = []
    for i in range(len(a)):
        b = a[i].strip()
        b_parsed = b.split(',')
        event_time = int(b_parsed[0])
        energy_deposited += [float(b_parsed[1])]

        timetracker += event_time

        # print(timetracker)

        if timetracker >= 1E6:
            timetracker = 0
            source_id = 0
            counts, energy_edges = np.histogram(energy_deposited,
                                                bins=binnumber,
                                                range=(0.0, 3000.0))
            spectra = np.vstack((spectra, counts))
            counter += 1
            energy_deposited = []
        # if counter >= 100:
        #     break
    # print(np.sum(spectra[0, :]))
    time = np.linspace(0, counter, counter)
    time = time.reshape((time.shape[0], 1))
    # print(time.shape, spectra.shape)
    tosave = np.hstack((time, spectra))

    f.close()
    head, tail = os.path.split(item)
    print(tail, spectra.shape)
    # f = open(os.path.join('./integrations', tail), 'w')
    # np.savetxt(f, tosave, delimiter=',')
    # f.close()
    np.save(os.path.join(outdir, tail[:-4] + '.npy'), tosave)
    return
Пример #10
0
def full_benchmark(num_env,
                   num_traj,
                   experiment_type,
                   save=True,
                   config='default',
                   report=True,
                   params=None,
                   system='acrobot_obs',
                   traj_id_offset=1800):

    sr = np.zeros((num_env, num_traj))
    time = np.zeros((num_env, num_traj))
    costs = np.zeros((num_env, num_traj))

    for env_id in range(num_env):
        for traj_id in range(num_traj):
            experiment_func = importlib.import_module(
                ".{}_exp".format(experiment_type),
                package="experiments").experiment
            result = experiment_func(env_id,
                                     traj_id + traj_id_offset,
                                     params=params,
                                     system=system)
            sr[env_id, traj_id] = result['successful']
            if result['successful']:
                time[env_id, traj_id] = result['planning_time']
                costs[env_id, traj_id] = result['costs']
            if save:
                Path("results/cpp_full/{}/{}/".format(system, config)).mkdir(
                    parents=True, exist_ok=True)
                np.save(
                    'results/cpp_full/{}/{}/sr_{}_{}.npy'.format(
                        system, config, num_env, num_traj), sr)
                np.save(
                    'results/cpp_full/{}/{}/time_{}_{}.npy'.format(
                        system, config, num_env, num_traj), time)
                np.save(
                    'results/cpp_full/{}/{}/costs_{}_{}.npy'.format(
                        system, config, num_env, num_traj), costs)
            if report:
                sr_list = sr.reshape(-1)[:(num_traj * env_id + traj_id + 1)]
                mask = sr_list > 0
                print("sr:{}\ttime:{}\tcosts:{}".format(
                    sr_list.mean(),
                    time.reshape(-1)[:(num_traj * env_id + traj_id +
                                       1)][mask].mean(),
                    costs.reshape(-1)[:(num_traj * env_id + traj_id +
                                        1)][mask].mean()))
Пример #11
0
def full_benchmark(num_env,
                   num_traj,
                   save=True,
                   config='default',
                   report=True,
                   params_module=None,
                   system='acrobot_obs'):
    sr = np.zeros((num_env, num_traj))
    time = np.zeros((num_env, num_traj))
    costs = np.zeros((num_env, num_traj))

    for env_id in range(num_env):
        for traj_id in range(num_traj):
            result = experiment(env_id,
                                traj_id,
                                params_module=params_module,
                                system=system)
            sr[env_id, traj_id] = result['successful']
            if result['successful']:
                time[env_id, traj_id] = result['planning_time']
                costs[env_id, traj_id] = result['costs']
            if save:
                Path("results/cpp_full/{}/{}/".format(config, system)).mkdir(
                    parents=True, exist_ok=True)
                np.save(
                    'results/cpp_full/{}/{}/sr_{}_{}.npy'.format(
                        config, system, num_env, num_traj), sr)
                np.save(
                    'results/cpp_full/{}/{}/time_{}_{}.npy'.format(
                        config, system, num_env, num_traj), time)
                np.save(
                    'results/cpp_full/{}/{}/costs_{}_{}.npy'.format(
                        config, system, num_env, num_traj), costs)
            if report:
                print("sr:{}\ttime:{}\tcosts:{}".format(
                    sr.reshape(-1)[:(num_traj * env_id + traj_id + 1)].mean(),
                    time.reshape(-1)[:(num_traj * env_id + traj_id +
                                       1)].mean(),
                    costs.reshape(-1)[:(num_traj * env_id + traj_id +
                                        1)].mean(),
                ))
Пример #12
0
def mag(xyz: np.array) -> np.array:
    time = xyz[:, 0]
    L2_mag = (xyz[:, -3]**2 + xyz[:, -2]**2 + xyz[:, -1]**2)**(0.5)
    return np.hstack((time.reshape(-1, 1), L2_mag.reshape(-1, 1)))
Пример #13
0
    def __init__(self,
                 data,
                 delta,
                 lambd,
                 nonsnps=0,
                 time=None,
                 seed=16962,
                 sigma='power',
                 TType=torch.DoubleTensor):
        """
        data: a Tensor.
        delta: indicator for right censoring (1 if uncensored, 0 if censored)
        lambd: regularization parameter
        """
        self.rank = dist.get_rank()
        self.size = dist.get_world_size()
        self.seed = seed
        torch.manual_seed(seed)

        self.TType = TType

        self.n, self.p = data.shape
        n, p = self.n, self.p
        print(n, p)

        self.data = data.type(TType)
        self.delta = delta.type(TType)
        self.delta_dist = distmat.dist_data(self.delta, TType=TType)
        self.prev_obj = -inf

        self.beta = distmat.dist_data(torch.zeros((p, 1)).type(TType),
                                      TType=TType)
        self.beta_prev = distmat.dist_data(torch.zeros((p, 1)).type(TType),
                                           TType=TType)

        self.datat = self.data.t()

        if sigma == 'power':
            l2 = self.power()
            self.sigma = (1 / (2 * (l2**2))).item()
        elif sigma == 'quicknorm':
            b = self.spectral_upper_bdd()
            self.sigma = (1 / (2 * (b**2))).item()
        else:
            self.sigma = sigma

        print(self.sigma)
        self.lambd = lambd
        self.nonsnps = nonsnps
        self.soft_threshold_ftn = torch.nn.Softshrink(lambd)

        if time is None:
            time = -torch.arange(0, n).view(-1, 1)

        self.breslow_ind = torch.tensor(breslow_ind(time.cpu().numpy())).to(
            dtype=torch.int64, device=self.beta.chunk.device)

        time_local = time.reshape(-1, 1).type(TType)
        time_dist = distmat.dist_data(time, TType=self.TType)
        #r_local = torch.arange(0, n).view(-1, 1).type(TType)
        #r_dist = distmat.dist_data(r_local, TType=self.TType)
        self.pi_ind = (time_dist - time_local.t() >= 0).type(TType)
Пример #14
0
Файл: ILC.py Проект: TFLQW/ILC
     ddq1 = (A[0, 0] * dq1 + A[0, 1] * dq2 + Z[0] + D2[0, 0] *
             (-Ta[0, 0] + tol1) + D2[0, 1] * (-Ta[1, 0] + tol2))[0]
     ddq2 = (A[1, 0] * dq1 + A[1, 1] * dq2 + Z[1] + D2[1, 0] *
             (-Ta[0, 0] + tol1) + D2[1, 1] * (-Ta[1, 0] + tol2))[0]
     dq1 = dq1 + ddq1 * dts
     dq2 = dq2 + ddq2 * dts
     q1 = q1 + dq1 * dts
     q2 = q2 + dq2 * dts
     q[t, :] = [q1, q2]
     dq[t, :] = [dq1, dq2]
     pass
 # q_ex.append(q)
 # dq_ex.append(dq)
 # ax = fig.add_subplot(2, 1, 1)
 e_ex[i, :] = np.mean(e, axis=0)
 ax1[0].plot(time.reshape(time.shape[0], 1),
             q[:, 0],
             color=seaborn.xkcd_rgb[color_map[i]])
 ax1[0].plot(time.reshape(time.shape[0], 1),
             q1d,
             color='k',
             linestyle='--')
 # ax = fig.add_subplot(2, 1, 2)
 ax1[1].plot(time.reshape(time.shape[0], 1),
             q[:, 1],
             color=seaborn.xkcd_rgb[color_map[i]])
 ax1[1].plot(time.reshape(time.shape[0], 1),
             q2d,
             color='k',
             linestyle='--')
 fig1.canvas.flush_events()
def parse_datafiles(targetfile, binno):

    # energy = np.linspace(0, 3000, 1024)

    # filelist = ['/home/holiestcow/Documents/zephyr/datasets/muse/trainingData/109798.csv']

    # process all files.
    # filelist = glob.glob('/home/holiestcow/Documents/zephyr/datasets/muse/trainingData/*.csv')

    item = targetfile
    # for item in filelist:
    f = open(item, 'r')
    a = f.readlines()

    binnumber = 1024

    counter = 0

    spectra = np.zeros((0, binnumber))
    timetracker = 0
    energy_deposited = []
    for i in range(len(a)):
        b = a[i].strip()
        b_parsed = b.split(',')
        event_time = int(b_parsed[0])
        energy_deposited += [float(b_parsed[1])]

        timetracker += event_time

        # print(timetracker)

        if timetracker >= 1E6:
            timetracker = 0
            source_id = 0
            counts, energy_edges = np.histogram(energy_deposited,
                                                bins=binnumber,
                                                range=(0.0, 4000.0))
            spectra = np.vstack((spectra, counts))
            counter += 1
        # if counter >= 100:
        #     break
    # print(np.sum(spectra[0, :]))
    time = np.linspace(0, counter, counter)
    time = time.reshape((time.shape[0], 1))
    # print(time.shape, spectra.shape)
    tosave = np.hstack((time, spectra))

    f.close()
    head, tail = os.path.split(item)
    print(tail, spectra.shape)
    # f = open(os.path.join('./integrations', tail), 'w')
    # np.savetxt(f, tosave, delimiter=',')
    # f.close()
    np.save(os.path.join('./integrations', tail[:-4] + '.npy'), tosave)

    # features = nscrad_rebin(spectra, 64)
    # features_bg = features[:30, :]
    # nuisance = nscrad_get_nuisance()
    # # energy = energy_edges[1:]
    # nuisance_matrix = np.zeros((0, binno))
    # for key in nuisance:
    #     fig = plt.figure()
    #     plt.plot(nuisance[key]['energy'], nuisance[key]['counts'])
    #     fig.savefig('nuisance_{}.png'.format(key))
    #     print([0] + nuisance[key]['energy'])
    #     print(len(nuisance[key]['energy']), len(nuisance[key]['counts']))
    #
    #     out = np.array(rebin(np.array([0] + nuisance[key]['energy']), np.array(nuisance[key]['counts']),
    #                 energy_edges))
    #     out = out.reshape((1, len(out)))
    #     nuisance_matrix = np.vstack((nuisance_matrix, out))
    #
    # nuisance_matrix = nscrad_rebin(nuisance_matrix, 64)
    # nuisance_matrix = nuisance_matrix.reshape((nuisance_matrix.shape[1], nuisance_matrix.shape[0]))
    # bullshit = nscrad_get_parameters(features, features_bg, nuisance_matrix)
    return
Пример #16
0
    def fit(self, data_set, test_set):
        self._sess.run(tf.global_variables_initializer())
        data_set.epoch_completed = 0

        for c in tf.trainable_variables(self._name):
            print(c.name)

        print("acc\tauc\tepoch\tloss\tloss_diff\tcount")
        logged = set()
        loss = 0
        count = 0
        while data_set.epoch_completed < self._epochs:
            dynamic_features, time, labels = data_set.next_batch(
                self._batch_size)
            self._sess.run(self._train_op,
                           feed_dict={
                               self._x:
                               dynamic_features,
                               self._y:
                               labels,
                               self._t:
                               time.reshape(-1, dynamic_features.shape[1], 1)
                           })
            # self._sess.run(self._train_op,
            #                feed_dict={self._x: dynamic_features[:, :, 1:],
            #                           self._y: labels})

            if data_set.epoch_completed % self._output_n_epoch == 0 and data_set.epoch_completed not in logged:
                logged.add(data_set.epoch_completed)
                loss_prev = loss
                loss = self._sess.run(self._loss,
                                      feed_dict={
                                          self._x:
                                          data_set.dynamic_features,
                                          self._y:
                                          data_set.labels,
                                          self._t:
                                          data_set.time.reshape(
                                              -1, dynamic_features.shape[1], 1)
                                      })

                # loss = self._sess.run(self._loss,
                #                       feed_dict={self._x: data_set.dynamic_features[:, :,1:].reshape(-1, self._time_steps,self._num_features),
                #                                  self._y: data_set.labels})
                loss_diff = loss_prev - loss
                y_score = self.predict(test_set)
                y_score = y_score.reshape([
                    -1,
                ])
                test_labels = test_set.labels
                test_labels = test_labels.reshape([
                    -1,
                ])
                auc = roc_auc_score(test_labels, y_score)

                print("{}\t{}\t{}\t{}\t{}\t{}".format(
                    data_set.epoch_completed, auc, loss, loss_diff, count,
                    datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
                # 设置训练停止条件
                if loss > self._max_loss:
                    count = 0
                else:
                    if loss_diff > self._max_pace:
                        count = 0
                    else:
                        count += 1
                if count > 9:
                    break
        # save_path = self._save.save(self._sess, self._name + "model/save_net" +time.strftime("%m-%d-%H-%M-%S", time.localtime()) + ".ckpt")
        # t = time.localtime()
        t = datetime.datetime.now().strftime("%m-%d-%H-%M-%S")
        save_path = self._save.save(
            self._sess, self._name + "model/save_net" + t) + ".ckpt"
        print("Save to path: ", save_path)
Пример #17
0
                    'd3_dot': str(joint_vel[2, i])
                })

            rate.sleep()

print('Export Complete.')
print('--------------------------------------')

fig, axs = plt.subplots(2, 2)
fig.suptitle('Kinematic Trajectory Data')
axs[0, 0].plot(planned_time.reshape(N, 1), joint_pos[0, :].reshape(N, 1),
               planned_time.reshape(N, 1), joint_pos[1, :].reshape(N, 1),
               planned_time.reshape(N, 1), joint_pos[2, :].reshape(N, 1))
axs[0, 0].set_title('Planned Joint Positions')
axs[0,
    1].plot(time.reshape(data_points,
                         1), measured_joint_pos[0, :].reshape(data_points, 1),
            time.reshape(data_points,
                         1), measured_joint_pos[1, :].reshape(data_points, 1),
            time.reshape(data_points, 1),
            measured_joint_pos[2, :].reshape(data_points, 1))
axs[0, 1].set_title('Measured Joint Positions')
axs[1,
    0].plot(time.reshape(data_points,
                         1), measured_joint_eff[0, :].reshape(data_points, 1),
            time.reshape(data_points,
                         1), measured_joint_eff[1, :].reshape(data_points, 1),
            time.reshape(data_points, 1),
            measured_joint_eff[2, :].reshape(data_points, 1))
axs[1, 0].set_title('Measured Joint Efforts')

plt.show()