def main(args):
    np.random.seed(args.seed)
    gens = data.instantiate_generators()

    X, t_phn, t_spk = data.generate(gens, 100)
    X_val, t_phn_val, t_spk_val = data.generate(gens, 100)

    plotter = plotting.Plotter(args.no_plot)
    plotter.plot(X, t_phn, t_spk, name="Raw data")
    raw_bl, raw_ur = plotter.plot(X_val,
                                  t_phn_val,
                                  t_spk_val,
                                  name="Raw validation data")

    torch.manual_seed(args.seed)
    bne, phn_dec, spk_dec = model.create_models(args.bne_width)

    print("\nTraining PHN network")
    training.train(bne, [phn_dec],
                   itertools.chain(bne.parameters(), phn_dec.parameters()),
                   (X, [t_phn]), (X_val, [t_phn_val]), args.nb_epochs)

    bl, ur = plotter.plot(X,
                          t_phn,
                          t_spk,
                          name="BN features, PHN optimized",
                          transform=bne)
    plotting.plot_preds(plotter, "PHN decoding in raw space", raw_bl, raw_ur,
                        lambda x: phn_dec(bne(x)))
    plotting.plot_preds(plotter, "PHN decoding in BN space", bl, ur, phn_dec)

    print("\nTraining SPK decoder")
    training.train(bne, [spk_dec], spk_dec.parameters(), (X, [t_spk]),
                   (X_val, [t_spk_val]), args.nb_epochs)
示例#2
0
def main(args):
    np.random.seed(args.seed)
    gens = data.instantiate_generators()

    X, t_phn, t_spk = data.generate(gens, 100)
    X_val, t_phn_val, t_spk_val = data.generate(gens, 100)

    plotter = plotting.Plotter(args.no_plot)
    plotter.plot(X, t_phn, t_spk, name="Raw data")
    raw_bl, raw_ur = plotter.plot(X_val,
                                  t_phn_val,
                                  t_spk_val,
                                  name="Raw validation data")

    torch.manual_seed(args.seed)
    bn_extractor_init, phn_decoder_init, spk_decoder_init = model.create_models(
        args.bne_width)

    bn_extractor = copy.deepcopy(bn_extractor_init)
    spk_decoder = copy.deepcopy(spk_decoder_init)
    phn_decoder = copy.deepcopy(phn_decoder_init)

    print("\nTraining in disconcert, from same init:")
    adversary_train(bn_extractor, phn_decoder, spk_decoder,
                    (X, [t_phn, t_spk]), (X_val, [t_phn_val, t_spk_val]),
                    args.nb_epochs)

    bl, ur = plotter.plot(X,
                          t_phn,
                          t_spk,
                          name="BN features, PHN-SPK optimized",
                          transform=bn_extractor)
    plotting.plot_preds(plotter,
                        "PHN decoding in disconcertly trained BN space", bl,
                        ur, phn_decoder)
BATCH_SIZE = 128
LEARNING_RATE = 0.0001
MOMENTUM = 0.5
LAMBDA1 = 1
LAMBDA2 = 10
INPUT_SHAPE_GEN = (32, 32, 1)
INPUT_SHAPE_DIS = (32, 32, 3)
WEIGHTS_GEN = 'weights_cifar10_yuv_gen.hdf5'
WEIGHTS_DIS = 'weights_cifar10_yuv_dis.hdf5'
WEIGHTS_GAN = 'weights_cifar10_yuv_gan.hdf5'
MODE = 1  # 1: train - 2: visualize

model_gen, model_dis, model_gan = create_models(
    input_shape_gen=INPUT_SHAPE_GEN,
    input_shape_dis=INPUT_SHAPE_DIS,
    output_channels=2,
    lr=LEARNING_RATE,
    momentum=MOMENTUM,
    loss_weights=[LAMBDA1, LAMBDA2])

if os.path.exists(WEIGHTS_GEN):
    model_gen.load_weights(WEIGHTS_GEN)

if os.path.exists(WEIGHTS_DIS):
    model_dis.load_weights(WEIGHTS_DIS)

if os.path.exists(WEIGHTS_GAN):
    model_gan.load_weights(WEIGHTS_GAN)

model_gen.summary()
model_dis.summary()
示例#4
0
assert max_label == 9, "max label was not the expected 9"

train_buckets = dataset_to_buckets((x_train, y_train))
test_buckets = dataset_to_buckets((x_test, y_test))
train_triplets = pick_triplets(train_buckets, TRAIN_TRIPLET_AMT)
test_triplets = pick_triplets(test_buckets, TEST_TRIPLET_AMT)

first_triplet = get_triplet(0, train_triplets)
display_images(first_triplet)

input_shape = get_triplet(0, train_triplets)[0].shape
assert len(input_shape) == 1, f"Shape {input_shape} should have only had one dimension - it should have already been flattened"
input_length: ExampleLength = input_shape[0]

# create the models - one for training, one for prediction; uses shared layers
training_model, prediction_model = md.create_models(input_length=input_length, output_length=OUTPUT_LENGTH)

encoded_triplet = md.encode_examples(first_triplet, prediction_model)
anchor, positive, negative = encoded_triplet
print(f"Positive loss before training: {np_triplet_loss(anchor, positive)}")
print(f"Negative loss before training: {np_triplet_loss(anchor, negative)}")

display_images(encoded_triplet)

# create the loss function
triplet_loss = md.get_triplet_loss(TRAIN_ALPHA)
print("Compiling....")

# compile the model for training
training_model.compile(loss=triplet_loss, optimizer=Adam(lr=LEARNING_RATE),
                       metrics=[md.get_triplet_accuracy(TEST_ALPHA)]
def run():

    print("프로그램을 시작합니다..")
    print("그래프를 표현할 3개의 데이터를 입력받습니다.")

    tableNumber = 0
    unitinfo = []
    timeinfo = []

    while tableNumber < 3:
        print("테이블{0}의 데이터 정보를 입력받습니다.".format(tableNumber + 1))
        aList = input_information()
        print("테이블{0}은 {1}{2}마다 데이터를 생성합니다.".format(tableNumber + 1, aList[1],
                                                    aList[0]))
        timeinfo.append(aList[1])
        unitinfo.append(aList[0])

        tableNumber = tableNumber + 1

    timeinfo = time_translate(unitinfo, timeinfo)

    new_table_1 = create_models("table1")
    new_table_2 = create_models("table2")
    new_table_3 = create_models("table3")

    new_avgtable = create_avg_table("avgtable")

    new_symbol_table = create_symbol_table("symtable")

    init_db()

    model.add_unit_entry(new_avgtable)

    p1 = threading.Thread(target=model.data_generator1,
                          args=(
                              new_table_1,
                              timeinfo[0],
                          ))
    p2 = threading.Thread(target=model.data_generator2,
                          args=(
                              new_table_2,
                              timeinfo[1],
                          ))
    p3 = threading.Thread(target=model.data_generator3,
                          args=(
                              new_table_3,
                              timeinfo[2],
                          ))
    p4 = threading.Thread(target=model.data_update,
                          args=(
                              new_table_1,
                              new_table_2,
                              new_table_3,
                              new_avgtable,
                              new_symbol_table,
                          ))

    p1.start()
    p2.start()
    p3.start()
    p4.start()
示例#6
0
def main(targets):
    if 'data' in targets:
        with open('config/data-params.json') as fh:
            data_cfg = json.load(fh)

        data = get_features_labels(**data_cfg)

    if 'compare' in targets:
        with open('config/data-params.json') as fh:
            data_cfg = json.load(fh)
        with open('config/compare.json') as f:

            compare_cfg = json.load(f)
            jet_feat = compare_cfg['jet_features']
            track_feat = compare_cfg['track_features']
            sv_feat = compare_cfg['sv_features']

            entrystop = compare_cfg['entrystop']
            namedecode = compare_cfg['namedecode']

        compare(**data_cfg, **compare_cfg)

    if 'conv1d' in targets:
        with open('config/data-params.yml') as file:
            # The FullLoader parameter handles the conversion from YAML
            #                 # scalar values to Python the dictionary format
            definitions = yaml.load(file, Loader=yaml.FullLoader)

        with open('config/model-params.json') as fh:
            data_cfg = json.load(fh)

        create_models(**definitions, **data_cfg)

    if 'test' in targets:
        with open('config/data-params.yml') as file:
            # The FullLoader parameter handles the conversion from YAML
            #                 # scalar values to Python the dictionary format
            definitions = yaml.load(file, Loader=yaml.FullLoader)

        with open('config/test-model-params.json') as fh:
            data_cfg = json.load(fh)
        create_models(**definitions, **data_cfg)

    if 'all' in targets:
        with open('config/data-params.json') as fh:
            data_cfg = json.load(fh)
        with open('config/compare.json') as f:

            compare_cfg = json.load(f)
            jet_feat = compare_cfg['jet_features']
            track_feat = compare_cfg['track_features']
            sv_feat = compare_cfg['sv_features']

            entrystop = compare_cfg['entrystop']
            namedecode = compare_cfg['namedecode']

        compare(**data_cfg, **compare_cfg)

        with open('config/data-params.yml') as file:
            # The FullLoader parameter handles the conversion from YAML
            #                 # scalar values to Python the dictionary format
            definitions = yaml.load(file, Loader=yaml.FullLoader)

        with open('config/model-params.json') as fh:
            data_cfg = json.load(fh)

        create_models(**definitions, **data_cfg)

    return