Ejemplo n.º 1
0
def get_weather_by_station(sno):

    par = data_parser.data_parser()
    par.update_current_data()
    bw_df = par.generate_bike_weather_data()
    bw_df_sno = bw_df[bw_df['sno'] == sno]

    bw_df_sno = bw_df_sno[[
        'WDIR', 'WDSE', 'TEMP', 'HUMD', 'PRES', 'H_24R', 'WSGust', 'WDGust',
        'UVI', 'PrecpHour', 'Visb'
    ]]

    return bw_df_sno
Ejemplo n.º 2
0
def track_particles(datapath, dataFormat, minLength, search_range):
    types = os.listdir(os.path.join(datapath, dataFormat))
    # Import and parse data
    untracked = [data_parser(os.path.join(datapath, dataFormat, type_), dataFormat) for i, type_ in enumerate(types)]
    print('Particles imported')
    # Track the particles
    tracked_particle = [tp.link_df(particles, search_range, memory=5).sort_values(by=['particle','frame'], ignore_index=True) 
                        for particles in untracked]
    nFiles = len(tracked_particle[0].frame.unique())
    del untracked
    # Remove small tracks and repair small holes (linear interpolation) and fill large holes with NaN
    tracked_repaired = [track_repair(track, 10, nFiles, minLength)
                        for track in tracked_particle]
    del tracked_particle
    # Save sorted, renumbered, and complete tracks
    tracked_complete = [[] for track in tracked_repaired]
    nParticles = [len(track.particle.unique()) for track in tracked_repaired]
    for i, track in enumerate(tracked_repaired):
        tracked_complete[i] = renumber_particles(track, 'particle', int(np.sum(nParticles[:i])))
    tracked_complete = pd.concat(tracked_repaired, ignore_index=True)
    tracked_complete = tracked_complete.sort_values(by=['frame', 'particle'], ignore_index=True)
    tracked_complete.to_pickle(os.path.join(datapath, r'tracked.pkl'))
Ejemplo n.º 3
0
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    #tf.enable_eager_execution()
    with tf.Session() as sess:
        parser = argparse.ArgumentParser(description='')
        parser.add_argument(
            '--dataset_dir',
            dest='dataset_dir',
            default='/notebooks/dataset/GANs/Persian_miniature',
            help='path of the dataset')
        parser.add_argument('--max_depth',
                            dest='max_depth',
                            type=float,
                            default=1,
                            help='maximum depth')
        args = parser.parse_args()
        ds = data_parser(args=args)
        ds_train = ds.create_dataset()

        ds = ds_train.shuffle(buffer_size=1000)
        ds = ds.repeat()

        #ds = ds.batch(1)
        ds = ds.apply(tf.contrib.data.batch_and_drop_remainder(1))
        ds = ds.prefetch(buffer_size=AUTOTUNE)
        # creating the training iterator
        iter = ds.make_one_shot_iterator()

        rgb = iter.get_next()

        image_mean, image_var = tf.nn.moments(rgb, axes=[0, 1, 2])
Ejemplo n.º 4
0
        print("Nodes in the Ray cluster:")
        print(ray.nodes())

        from RCTMA_parallel import rctma
    else:
        from RCTMA_sequential import rctma

    IN_FILE = "InputDataC{}.txt".format(str(test_num))

    time1 = time.time()
    lbd, n0, particles = parameter_parser(nmax, IN_FILE)
    os.system("module load cmake")
    os.system("cmake external/CMakeLists.txt")
    os.system("(cd external && make)") 
    os.system("./external/external_hij")  
    data_parser(num, nmax)

    time2 = time.time()
    print("Time for preparing Hij is:", time2 - time1)

    T = rctma(num, nmax, lbd, n0, particles)
    time3 = time.time()
    print("Time for computing vscatter and rcmta matrices is:", time3 - time2)
 
    J = pickle.load(open("external/data/J.npy", "rb"))
    Cext = oaec(lbd, n0, T, J)
    time4 = time.time()
    print("Cext:", Cext)
    print("Time for reading Jij and computing OAEC is :", time4 - time3)
Ejemplo n.º 5
0
 def read_data(self):
     dp = data_parser(args=self.args)
     self.ds_train = dp.create_dataset()