Пример #1
0
def main(argv):
    def help():
        print "Usage: plotter -t <timestamp>"
    timestamp = ''
    try:
        opts, args = getopt.getopt(argv, "ht:", ["timestamp="])
    except getopt.GetoptError:
        help()
        sys.exit(2)
    if len(opts)==0:
        help()
    else:
        for opt, arg in opts:
            if opt == '-h':
                help()
                sys.exit()
            elif opt in ("-t", "--timestamp"):
                timestamp = arg
            else:            
                help()
                sys.exit()
        conf = read_config()
        logger.info("Plotting timestamp {0}".format(timestamp))
        plot_all(
            conf.get('directories', 'sessions'), 
            conf.get('directories', 'images'), 
            timestamp)
        logger.info("Finished plotting.")
Пример #2
0
def main(argv):
    def help():
        print "Usage: plotter -t <timestamp>"

    timestamp = ''
    try:
        opts, args = getopt.getopt(argv, "ht:", ["timestamp="])
    except getopt.GetoptError:
        help()
        sys.exit(2)
    if len(opts) == 0:
        help()
    else:
        for opt, arg in opts:
            if opt == '-h':
                help()
                sys.exit()
            elif opt in ("-t", "--timestamp"):
                timestamp = arg
            else:
                help()
                sys.exit()
        conf = read_config()
        logger.info("Plotting timestamp {0}".format(timestamp))
        plot_all(conf.get('directories', 'sessions'),
                 conf.get('directories', 'images'), timestamp)
        logger.info("Finished plotting.")
Пример #3
0
def main():
    DATASET_DIR = './bigdata_datasets'
    datasets = load_data.get_datasets(DATASET_DIR)
    fixed_d = non_zero_fix(datasets)
    fixed_d = interpolation_fix(fixed_d)
    fixed_d = huge_change_fix(fixed_d)
    plotter.plot_all(fixed_d)
Пример #4
0
        'variety_counts': [name[1] for name in sorted(variety)],
    }
    total_types = float(np.array(data_types['variety_counts']).sum())
    data_types['percentage'] = [str(round((name[1] / total_types), 4) * 100) + "%" for name in sorted(variety)]
    return data_country, data_types


def data_years(wine_cellar_dictionary):
    vintage = []
    for element in wine_cellar_dictionary:
        vintage.append(wine_cellar_dictionary[element][2])
        vintage_cleaned = list(np.nan_to_num(vintage))
    vintage = list(set([(x, vintage_cleaned.count(x)) for x in vintage_cleaned]))
    vintage_dic = {
        'vintage_years': [str(name[0]) for name in sorted(vintage)],
        'vintage_count': [(name[1]) for name in sorted(vintage)],
    }
    total = float(np.array(vintage_dic['vintage_count']).sum())
    vintage_dic['percentage'] = [str(round((name[1] / total), 4) * 100) + "%" for name in sorted(vintage)]
    return vintage_dic


wineDictionary = pre_process_the_data(df)
# geoResults = geo_coding(wineDictionary)
geoResults = data
# check_geo_consistency(wineDictionary, geoResults)
data_map = location(wineDictionary, geoResults)
data_charts = data_charts(wineDictionary)
data_vintage = data_years(wineDictionary)
plotter.plot_all(data_charts[0], data_charts[1], data_vintage, data_map)
Пример #5
0
def main(is_train, prediction, plotting, scaling, selected_model):
    if len(sys.argv) < 3:
        print(LINESPLIT)
        print("Usage: python3 {} <path_to_ssn_datafile> <path_to_aa_datafile>".
              format(os.path.basename(__file__)))
        data_file = "data/SILSO/TSN/SN_m_tot_V2.0.txt"
        aa_file = "data/ISGI/aa_1869-01-01_2020-12-19_D.dat"
    else:
        data_file = sys.argv[1]
        aa_file = sys.argv[2]

    print(LINESPLIT)
    print("Code running on device: {}".format(device))

    ssn_data = datasets.SSN(data_file)
    aa_data = datasets.AA(aa_file)

    print(LINESPLIT)
    print('''Data loaded from file locations :
    SSN - {}
    AA - {}'''.format(os.path.abspath(data_file), os.path.abspath(aa_file)))

    if plotting:
        plotter.plot_all("combined_data1.jpg")

    cycle_data = ut.get_cycles(ssn_data)

    print(LINESPLIT)
    print("Solar cycle data loaded/saved as: cycle_data.pickle")
    print(LINESPLIT)
    ut.print_cycles(cycle_data)

    train_samples = datasets.Features(ssn_data, aa_data, cycle_data, normalize=scaling,\
    start_cycle=13, end_cycle=22)
    valid_samples = datasets.Features(ssn_data, aa_data, cycle_data, normalize=scaling,\
    start_cycle=23, end_cycle=23)
    valid_timestamps, _ = ut.gen_samples(ssn_data, aa_data, cycle_data,\
    cycle=23, normalize=scaling, tf=cycle_data["length"][23])
    predn_timestamps, predn_samples = ut.gen_samples(ssn_data, aa_data, cycle_data,\
    cycle=24, normalize=scaling)

    print(LINESPLIT)
    print('''Selected data:
    Training: SC 13 to 22
    Validation: SC 23
    Prediction: SC 24''')

    ############ FFNN/RNN/LSTM (model chosen by user) ############

    model = getattr(models, selected_model)(inp_dim=6).to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           mode="min",
                                                           factor=0.9,
                                                           verbose=True)

    print(LINESPLIT)
    print('''Selected model: {}\
    Training mode: {}\
    Prediction mode: {}'''.format(model, is_train, prediction))

    print(LINESPLIT)
    print("Selected optimizer: {}".format(optimizer))

    print(LINESPLIT)
    print('''Selected scheduler: {}(
    {})'''.format(scheduler.__class__.__name__, scheduler.state_dict()))

    pre_trained = load_model(model)

    if not pre_trained:
        if not is_train and prediction:
            print(LINESPLIT)
            print(
                "Warning: Prediction is ON with training OFF and no pretrained models available"
            )

    train_loader = DataLoader(dataset=train_samples,
                              batch_size=BATCH_SIZE,
                              shuffle=True)
    valid_loader = DataLoader(dataset=valid_samples,
                              batch_size=1,
                              shuffle=False)

    ### Training ###

    if is_train:
        if not pre_trained:
            model.train()
            print(LINESPLIT)
            print("Training model with solar cycle {} to {} data with: num_epochs={}".\
            format(datasets.START_CYCLE, datasets.END_CYCLE - 2, epochs))

            loss = train(model, train_loader, optimizer, scheduler, epochs)
            torch.save(
                model.state_dict(),
                "{}_{}_{}.pth".format(modelfolder, model.__class__.__name__,
                                      MAX_EPOCHS))

            plotter.plot_loss("Average Training Loss", range(len(loss)), loss, "tr_{}.png".\
            format(model.__class__.__name__))

            print(LINESPLIT)
            print('''Training finished successfully.
            Saved model checkpoints can be found in: {}
            Saved data/loss graphs can be found in: {}'''.format(
                modelfolder, graphfolder))

        else:
            print(LINESPLIT)
            print(
                "Skipping training, using pre-trained model for validation and prediction"
            )

    ### Validating ###

        model.eval()
        print(LINESPLIT)
        print("Validating model for solar cycle {} data".format(
            datasets.END_CYCLE - 1))

        valid_predictions, valid_loss = validate(model, valid_loader,
                                                 valid_timestamps)

        plotter.plot_predictions("SC{} Prediction".format(datasets.END_CYCLE - 1),\
        valid_timestamps, valid_predictions, "SC 23 Validation.png", compare=True)
        plotter.plot_loss("Validation Loss", range(len(valid_loss)), valid_loss, "val_{}.png".\
        format(model.__class__.__name__))

        print(LINESPLIT)
        print('''Validation finished successfully.\n
        Saved prediction/loss graphs can be found in: {}'''.format(
            graphfolder))

    ### Predicting ###

    if prediction:
        model.eval()
        print(LINESPLIT)
        print("Predicting SC {} using the above trained model".format(
            datasets.END_CYCLE))

        predn_predictions = predict(model, predn_samples, predn_timestamps)

        plotter.plot_predictions("SC{} Prediction".format(datasets.END_CYCLE),\
        predn_timestamps, predn_predictions, "SC 24 Prediction.png", compare=True)
Пример #6
0
    def plan_path(self):
        self.flight_state = States.PLANNING
        print("Searching for a path ...")
        TARGET_ALTITUDE = 5
        SAFETY_DISTANCE = 5

        self.target_position[2] = TARGET_ALTITUDE

        # read lat0, lon0 from colliders into floating point values
        lat0, lon0 = load_lat_lon()

        # set home position to (lon0, lat0, 0)
        self.set_home_position(lon0, lat0, 0)
        # retrieve current global position
        glob_p = self.global_position
        # convert to current local position using global_to_local()
        loc_p = global_to_local(glob_p, self.global_home)

        print('global home {0}, position {1}, local position {2}'.format(
            self.global_home, self.global_position, self.local_position))
        # Read in obstacle map
        data = np.loadtxt('colliders.csv',
                          delimiter=',',
                          dtype='Float64',
                          skiprows=2)

        # Define a grid for a particular altitude and safety margin around obstacles
        grid, north_offset, east_offset = create_grid(data, TARGET_ALTITUDE,
                                                      SAFETY_DISTANCE)
        print("North offset = {0}, east offset = {1}".format(
            north_offset, east_offset))
        # Define starting point on the grid (this is just grid center)
        # convert start position to current position rather than map center
        grid_start = (-north_offset + int(loc_p[0]),
                      -east_offset + int(loc_p[1]))
        # Set goal as some arbitrary position on the grid
        # set goal as latitude / longitude position and convert 37.792480, lon0 -122.397450
        loc_goal = global_to_local(self.glob_goal, self.global_position)
        grid_goal = (-north_offset + int(loc_goal[0]),
                     -east_offset + int(loc_goal[1]))
        # Run A* to find a path from start to goal
        # path, _ = a_star(grid, heuristic, grid_start, grid_goal)
        # add diagonal motions with a cost of sqrt(2) to your A* implementation
        # or move to a different search space such as a graph (not done here)
        print('Local Start and Goal: ', grid_start, grid_goal)
        skeleton = medial_axis(invert(grid))
        skel_start, skel_goal = find_start_goal(skeleton, grid_start,
                                                grid_goal)
        print(skel_start, skel_goal)
        # Run A* on the skeleton
        path, cost = a_star(
            invert(skeleton).astype(np.int), heuristic_func, tuple(skel_start),
            tuple(skel_goal))

        # prune path to minimize number of waypoints
        path = prune_path(path)

        plotter.plot_all(grid, grid_start, grid_goal, path, skeleton)

        # Convert path to waypoints
        waypoints = [[
            int(p[0]) + north_offset,
            int(p[1]) + east_offset, TARGET_ALTITUDE, 0
        ] for p in path]
        # Set self.waypoints
        self.waypoints = waypoints
        # send waypoints to sim (this is just for visualization of waypoints)
        self.connection.start()
        self.send_waypoints()
Пример #7
0
	access_key = ''
	access_secret = ''
	user_handle = '' # add an @ before the actual handle 

    # Tweepy initialization
    auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_key, access_secret)
    api = tweepy.API(auth)
    cursor = tweepy.Cursor(api.user_timeline, screen_name=user_handle, tweet_mode='extended').items()

    # key: tweet index (integer)
    # value: tweet object as provided by the twitter API
    results = {}
    for n, item in enumerate(cursor):
        results[n] = item._json
        print("Saving item number {}".format(n + 1))

    # The json file holds all raw information.
    with open('data_json.txt', 'w') as outfile:
        json.dump(results, outfile)

    # Convert select attributes from json to csv
    df = json_parser(results)
    df.to_csv('raw_data.csv')

    # Process dataframe, and create all pretty graphs
    df = data_parser(df)
    df.to_csv('data.csv')
    plot_all(df)