예제 #1
0
def run_dae_cpd(df):
    data = data_processing.load_dataset(df)
    values = data.values
    X, y = values[:, :-1], values[:, -1]  # X : Samples, Y: Labels

    ## COMMENTED THIS OUT TO PREVENT A 20 MIN DELAY
    #model = dae_cpd.dae(X, y).fit()
    #result = model.fit_predict()  # change point indexes
    result = [
        57, 211, 306, 359, 545, 691, 1176, 1319, 1412, 1820, 2277, 2470, 2559,
        2696, 2872, 3179, 3270, 3357, 3564, 3645, 3772, 3976, 4148, 4287, 4349,
        4408, 4472, 4557, 4758, 4856, 4933
    ]
    print(result)

    times = sorted(df['time'].unique())
    print(len(times))
    #print(times)

    segments = []  # list of timestamp segments
    segments.append((times[0], times[result[0]]))

    for i in range(1, len(result)):
        segments.append((times[result[i - 1]], times[result[i]]))

    print(segments[:3])
    return segments
예제 #2
0
def test():
    # Chosen by Jordan as the best model
    model_path = 'models/nc=62:F=3:M=5:lr=0.01:epoch=010.pt'
    network = load_model(model_path)
    full_dataset = load_dataset()
    _, __, test = split_dataset(full_dataset)
    test_acc = get_accuracy(network, test)
    print(f'Test accuracy = {test_acc}')
예제 #3
0
def run(dataset_path):

    model = Inception_V1(input_shape=(256, 256, 1))
    #model = LeNet(input_shape = (256,256,1))
    #model = ResNet50(input_shape = (256, 256, 1))
    #model = Inception_small_V1(input_shape = (256,256,1), classes=2)

    model.compile(optimizer='adam',
                  loss=[
                      'binary_crossentropy', 'binary_crossentropy',
                      'binary_crossentropy'
                  ],
                  metrics=['accuracy'])
    #model.compile(optimizer='adam', loss=['binary_crossentropy'], metrics=['accuracy'])

    print("Loading dataset")
    X_train, Y_train, X_valid, Y_valid = load_dataset(dataset_path)

    #np.save('./X_train.npy', X_train)
    #np.save('./Y_train.npy', Y_train)
    #np.save('./X_valid.npy', X_valid)
    #np.save('./Y_valid.npy', Y_valid)

    #extract_masks('./segments', './ground_truth', './data')

    #X_train = np.load('./X_train.npy')
    #Y_train = np.load('./Y_train.npy')
    #X_valid = np.load('./X_valid.npy')
    #Y_valid = np.load('./Y_valid.npy')

    #directories, dir_names = get_patient_directories(dataset_path)
    #get_lung_masks(directories, dir_names)

    #segment = np.load('./segment.npy')
    #print(dataset.pixel_array[250])

    Y_train = convert_to_one_hot(Y_train)
    Y_valid = convert_to_one_hot(Y_valid)

    class_weight = {0: 1., 1: 1.}

    #model.fit(X_train, [Y_train, Y_train, Y_train], epochs = 1, batch_size = 32, class_weight=class_weight)
    model.fit(X_train,
              Y_train,
              epochs=1,
              batch_size=32,
              class_weight=class_weight,
              validation_split=0.2)

    results = model.predict(X_valid)
    output = []
    for i in range(len(results)):
        if (results[i] < 0.5):
            output.append(0)
        else:
            output.append(1)
    num_equal = 0

    for i in range(len(output)):
        if (output[i] == Y_valid[i].astype(int)):
            num_equal += 1

    print(num_equal / len(output))
예제 #4
0
import pandas as pd

import data_processing as dp
import plot_computations as pc
import plotting
import markdown

DEBUG = True

external_stylesheets = ['https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css']

app = dash.Dash(__name__, external_stylesheets=external_stylesheets)

server = app.server

df = dp.load_dataset()
primary_energy_df = dp.load_primary_energy_sources(df)
with open(os.path.join("data", "united_states.geojson")) as infile:
    united_states_geojson = json.load(infile)

# Precomputed figures
# us_main_plot_dict = pc.precompute_main_plots(df, primary_energy_df)
# state_total_dict = pc.precompute_state_per_year(df)

app.layout = html.Div(children = [
    # html.Div(
    #     children=[
    #         html.Div(
    #             dcc.Graph(
    #                 id="choropleth",
    #                 figure=pc.update_choropleth(df, united_states_geojson)
예제 #5
0
def main():
    network = CharacterClassifier()
    full_dataset = load_dataset()
    train, val, test = split_dataset(full_dataset, batch_size=64)
    path = train_network(network, train, val, num_epochs=128)
    plot_curves(path)
예제 #6
0
def verify_on_small_dataset():
    network = CharacterClassifier(num_classes=4)
    full_dataset = load_dataset()
    dataloader = get_small_dataloader(full_dataset, num_classes=4)
    path = train_network(network, dataloader)
    plot_curves(path, val=False)