示例#1
0
    def __init__(self, num_modules=4, save_layer=False):
        super().__init__()
        self.num_modules = num_modules
        self.save_layer = save_layer
        self.cf_input = None

        #Create coordinate map to facilitate spatial reasoning
        self.coord_map = create_map((14, 14))

        #Create stem
        self.stem = nn.Sequential(
            nn.Conv2d(1026, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128), nn.ReLU(inplace=True))

        #Create FiLMed Network body
        layers = []
        for i in range(num_modules):
            layers.append(ResBlock())
        self.filmed_blocks = nn.Sequential(*layers)

        #Create final classifier
        self.classifier = nn.Sequential(nn.Conv2d(130, 512, kernel_size=1),
                                        nn.BatchNorm2d(512),
                                        nn.ReLU(inplace=True),
                                        nn.MaxPool2d(kernel_size=14),
                                        Flatten(), nn.Linear(512, 1024),
                                        nn.BatchNorm1d(1024),
                                        nn.ReLU(inplace=True),
                                        nn.Linear(1024, 32))
def test(model_name, threshold=0.5, save=True, verbose=True, refine=False):
    classifications = np.array([0, 0, 0, 0])
    results_folder = os.path.join(model_name, 'results')
    if not os.path.exists(results_folder): os.mkdir(results_folder)
    _, test_set = get_dataset_split()
    if refine: test_set = sort_imgs(test_set)
    prediction = None
    model = keras.models.load_model(os.path.join(model_name,
                                                 model_name + '.h5'),
                                    custom_objects=get_custom_objects())
    for i in range(len(test_set)):
        if verbose: display_progress(i / len(test_set))
        img_path, gt_path = test_set[i].replace('\n', '').split(',')

        img = read_image(img_path, pad=(4, 4))
        img = normalise_img(img)
        ground_truth = read_gt(gt_path)
        ground_truth = np.squeeze(ground_truth)

        if refine:
            prediction = ground_truth if prediction is None else prediction
            pmap = create_map(prediction > 0.5, 1)
            prob = get_prob_map(pmap)

        prediction = model.predict(img)

        prediction = np.squeeze(prediction)
        prediction = prediction[4:-4, ...]

        prediction = (prediction > threshold).astype(np.uint8)
        if refine: prediction = prediction * prob

        classifications += getPixels(prediction, ground_truth, 0.5)

        if save:
            save_image(prediction,
                       os.path.join(results_folder, ntpath.basename(img_path)))
            save_image(
                ground_truth,
                os.path.join(
                    results_folder,
                    ntpath.basename(img_path).replace('.png', '_gt.png')))


#            if refine:
#                prob = prob.astype(np.uint8)
#                save_image(os.path.join(results_folder,
#                                ntpath.basename(img_path).replace('.png', '_prob.png')), prob)

    print(model_name, threshold)
    printMetrics(getMetrics(classifications))
示例#3
0
def run_ICA(classifier_name, classifier_args, num_iter):

    features, labels, train_, val_, test_, graph, domain_labels = load_data()

    # run training
    t_begin = time.time()

    # random ordering
    np.random.shuffle(val_)

    y_true = [graph.Graph.nodes[node]['label'] for node in test_]

    local_clf = LocalClassifier(classifier_name, classifier_args)

    agg = pick_aggregator('count', domain_labels)

    relational_clf = RelationalClassifier(classifier_name, agg,
                                          classifier_args)

    ica = ICA(local_clf, relational_clf, True, max_iteration=num_iter)

    ica.fit(graph.Graph, train_)

    print('Model fitting done...')

    conditional_node_to_label_map = create_map(graph.Graph, train_)

    ica_predict = ica.predict(graph.Graph, val_, test_,
                              conditional_node_to_label_map)

    print('Model prediction done...')

    ica_accuracy = accuracy_score(y_true, ica_predict)
    t_end = time.time()
    print(classification_report(y_true, ica_predict))

    print(ica_accuracy)
    elapsed_time = t_end - t_begin
    print('Start time: \t\t' + time.strftime("%H:%M:%S", time.gmtime(t_begin)))
    print('Elapsed time: \t\t' +
          time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
    print('End time: \t\t' + time.strftime("%H:%M:%S", time.gmtime(t_end)))
示例#4
0
def velotafmap(input_dir, output_dir):
    """
    Map bike commuting performance over time.
    Input:
        -input_dir      str
        -output_dir     str
    """

    # read config
    config = configparser.ConfigParser()
    config_file = os.path.join(
        os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
        "config",
        "config.ini",
    )
    config.read(config_file)

    # rasterization parameters
    PIX_SIZE = int(config["velotafmap"]["pix_size"])
    PIX_DECIMALS = int(config["velotafmap"]["pix_decimals"])

    # time bounds
    START_DATE = datetime.datetime.strptime(config["velotafmap"]["start_date"],
                                            "%Y%m%d")
    END_DATE = datetime.datetime.strptime(config["velotafmap"]["end_date"],
                                          "%Y%m%d")
    DAYS = (END_DATE - START_DATE).days

    # spatial bounds
    epsg_code = int(config["velotafmap"]["projection_epsg"])
    PROJECTION = ccrs.epsg(epsg_code)
    XMIN, XMAX, YMIN, YMAX = [
        int(value)
        for value in config["velotafmap"]["spatial_bounds"].split(",")
    ]

    # filters parameters
    SIGMA_TIME_FILTER = float(config["velotafmap"]["sigma_time_filter"])
    SIGMA_SPATIAL_FILTER = float(config["velotafmap"]["sigma_spatial_filter"])

    # video and maps parameters
    VIDEO_FPS = int(config["velotafmap"]["video_fps"])

    # create output dir
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    if not os.path.exists(os.path.join(output_dir, "images")):
        os.makedirs(os.path.join(output_dir, "images"))

    # store processing start time
    processing_start_time = datetime.datetime.now()

    # initiate dataset
    x_coords = range(XMIN, XMAX, PIX_SIZE)
    y_coords = range(YMIN, YMAX, PIX_SIZE)
    t_coords = [
        START_DATE + datetime.timedelta(days=d) for d in range(DAYS + 1)
    ]
    dataset = xr.Dataset(
        data_vars={
            "velocity":
            xr.DataArray(
                data=np.nan,
                coords=[x_coords, y_coords, t_coords],
                dims=["x", "y", "time"],
            ),
            "occurences":
            xr.
            DataArray(  # will be used to count number of points for rasterization
                data=0,
                coords=[x_coords, y_coords, t_coords],
                dims=["x", "y", "time"],
            ),
        },
        coords={
            "x": x_coords,
            "y": y_coords,
            "time": t_coords,
        },
    )

    # loop through available strava activities to fill dataset
    input_files = []
    for input_file in tqdm([
            os.path.join(input_dir, f) for f in os.listdir(input_dir)
            if os.path.splitext(f)[1] == ".gpx"
    ]):

        if check_bike_commuting(
                input_file):  # filter activities that are not bike commuting

            # read gpx file
            (
                activity_date,
                _,
                _,
                _,
                _,
                _,
                points,
            ) = read_gpx(input_file, epsg_code)

        # store velocity data in dataset
        for index, point in points.iterrows():  # loop through points

            # find grid cell of current point
            x = int(np.around(point["x"], decimals=PIX_DECIMALS))  # x coord
            y = int(np.around(point["y"], decimals=PIX_DECIMALS))  # y coord
            t = datetime.datetime(year=index.year,
                                  month=index.month,
                                  day=index.day)  # time coord

            if np.isnan(dataset.velocity.loc[x, y,
                                             t]):  # this grid cell is empty

                # directly assign value
                dataset.velocity.loc[x, y, t] = point["vel"]

            else:  # this grid cell already has values

                # compute average velocity, taking this new point into account
                dataset.velocity.loc[
                    x, y,
                    t] += (point["vel"] - dataset.velocity.loc[x, y, t]) / (
                        dataset.occurences.loc[x, y, t] + 1)

            # increase this cell points count
            dataset.occurences.loc[x, y, t] += 1

    # apply 1D filter to velocity over time
    if SIGMA_TIME_FILTER != 0.0:
        dataset.velocity[:, :, :] = nan_filter_1d(np.asarray(
            dataset.velocity[:, :, :]),
                                                  sigma=SIGMA_TIME_FILTER,
                                                  axis=2)

    # apply 2D filter to velocity for each date
    if SIGMA_SPATIAL_FILTER != 0.0:
        for date in t_coords:
            dataset.velocity.loc[:, :, date] = nan_filter(
                np.asarray(dataset.velocity.loc[:, :, date]),
                sigma=SIGMA_SPATIAL_FILTER)

    # create map of average velocity over whole timeframe
    create_map(
        dataset.velocity.mean(dim="time"),
        os.path.join(output_dir, "average.png"),
        PROJECTION,
    )

    # create animation of velocity over time
    for date in tqdm(t_coords):  # loop through dates

        # create image for this date
        create_map(
            dataset.velocity.loc[:, :, date],
            os.path.join(output_dir, "images",
                         "{}.png".format(date.strftime("%Y%m%d"))),
            PROJECTION,
        )

    # create video file from images of whole timeframe
    create_video(
        os.path.join(output_dir, "video.mp4"),
        os.path.join(output_dir, "images"),
        VIDEO_FPS,
    )

    # write info file
    info_file = os.path.join(output_dir, "info.txt")
    write_info_file(info_file, config, processing_start_time)
示例#5
0
eval_idx = np.setdiff1d(range(adj.shape[0]), idx_train)

# run training
ica_accuracies = list()
for run in range(args.num_trials):

    t_begin = time.time()

    # random ordering
    np.random.shuffle(eval_idx)

    y_true = [graph.node_list[t].label for t in test]
    local_clf = LocalClassifier(args.classifier)
    agg = pick_aggregator(args.aggregate, domain_labels)
    relational_clf = RelationalClassifier(args.classifier, agg)
    ica = ICA(local_clf,
              relational_clf,
              args.bootstrap,
              max_iteration=args.max_iteration)
    ica.fit(graph, train)
    conditional_node_to_label_map = create_map(graph, train)
    ica_predict = ica.predict(graph, eval_idx, test,
                              conditional_node_to_label_map)
    ica_accuracy = accuracy_score(y_true, ica_predict)
    ica_accuracies.append(ica_accuracy)
    print 'Run ' + str(run) + ': \t\t' + str(
        ica_accuracy) + ', Elapsed time: \t\t' + str(time.time() - t_begin)

print("Final test results: {:.5f} +/- {:.5f} (sem)".format(
    np.mean(ica_accuracies), sem(ica_accuracies)))
示例#6
0
from utils import get_state_active_df, rev_df, create_map, create_state_active, create_trends, create_daily_cnf, create_daily_rec, create_table
import pandas as pd
#state_wise,daily_ts,total = create_csv()

#create_csv()
state_wise = pd.read_csv('data/google/state_wise.csv')
daily_ts = pd.read_csv('data/google/daily_ts.csv')
total = pd.read_csv('data/google/total.csv')

df = get_state_active_df(state_wise)
new_t = total['delta']
#new_ev,yest = rev_df(daily_ts)
#new_t = int(total.loc[0,'confirmed']) - int(yest)
ind_map = create_map(df)
state_active = create_state_active(df)
trend = create_trends(daily_ts)
daily_cnf = create_daily_cnf(daily_ts)
daily_rec = create_daily_rec(daily_ts)
tab1 = state_wise
for i in range(len(tab1)):
    if (tab1.loc[i, 'Confirmed'] == 0):
        tab1 = tab1.drop(i, axis=0)
tab1 = tab1.sort_values('Confirmed', ascending=False)
tab1.reset_index(inplace=True)
tab1 = tab1.drop('index', axis=1)
tab = create_table(tab1.drop('Unnamed: 0', axis=1))

from flask import Flask, render_template, Markup

app = Flask(__name__)
示例#7
0
def play_minesweeper_game_as_ai(**kwargs):
    """
    Play Minesweeper game as AI
    """
    # Judge whether it is training or not
    training = False
    if 'training' in kwargs and kwargs['training'] == True:
        training = True

    # Create the smallest map
    map_info = create_map(1)
    row_size = map_info['row_size']
    column_size = map_info['column_size']
    matrix = map_info['matrix'].split(',')

    # Initialize variables
    unrevealed_cell_indices = set(range(0, row_size * column_size))
    revealed_cell_indices = set()
    empty_cell_indices = set()
    picked_cell_index = -1

    # Get empty cell indices
    for matrix_index, matrix_value in enumerate(matrix):
        if matrix_value.startswith('0-'):
            empty_cell_indices.add(matrix_index)

    # Sweep empty cell for the first time
    if len(empty_cell_indices) > 0:
        picked_cell_index = random.choice(list(empty_cell_indices))
    else:
        picked_cell_index = random.choice(list(unrevealed_cell_indices))
    revealed_cell_indices = update_revealed_cell_indices(
        matrix, revealed_cell_indices, picked_cell_index)
    last_clicked_cell_row, last_clicked_cell_column = divmod(
        picked_cell_index, column_size)

    # Play until every cell has revealed
    while True:

        if len(revealed_cell_indices) == row_size * column_size:
            break

        # Find sweepable or flaggable cells
        indices = find_sweepable_or_flaggable_cells(matrix, row_size,
                                                    column_size,
                                                    revealed_cell_indices,
                                                    last_clicked_cell_row,
                                                    last_clicked_cell_column)

        # Pick random one if it is impossible to find index with given condition
        if indices == []:

            # Get data for training sets
            if training:
                collect_data(matrix, revealed_cell_indices, row_size,
                             column_size)
                unrevealed_cell_indices = set(range(
                    0, row_size * column_size)) - revealed_cell_indices
                picked_cell_index = random.choice(
                    list(unrevealed_cell_indices))

            # Test sets
            else:
                picked_cell_index = get_most_trustworthy_cell_index(
                    matrix, revealed_cell_indices, row_size, column_size)

            # Update revealed cell indices and last clicked cell
            revealed_cell_indices = update_revealed_cell_indices(
                matrix, revealed_cell_indices, picked_cell_index)
            last_clicked_cell_row, last_clicked_cell_column = divmod(
                picked_cell_index, column_size)

        # Sweep or flag cells with given condition
        else:
            for picked_cell_index in indices:
                revealed_cell_indices = update_revealed_cell_indices(
                    matrix, revealed_cell_indices, picked_cell_index)
                last_clicked_cell_row, last_clicked_cell_column = divmod(
                    picked_cell_index, column_size)

    return None