Beispiel #1
0
def target(ostream, params, parallel_tests, sorted_mode):
    global PROCESSED, REPORT_LOCK, TOTAL
    for result in process(params, pool_size=parallel_tests, sorted_mode=sorted_mode):
        with REPORT_LOCK:
            PROCESSED += 1
            print_progress_info(PROCESSED, TOTAL)
            save_result(ostream, strip_ephemeral(result))
Beispiel #2
0
def target(ostream, params, parallel_tests):
    global PROCESSED, REPORT_LOCK, TOTAL
    for result in process(params, pool_size=parallel_tests):
        with REPORT_LOCK:
            PROCESSED += 1
            print_progress_info(PROCESSED, TOTAL)
            save_result(ostream, strip_ephemeral(result))
Beispiel #3
0
def main(conf, istream, ostream, test_whitelist, test_blacklist, stage_whitelist, stage_blacklist,
            tags_whitelist, tags_blacklist, no_action):
    '''
    main worker function
    performs particular stages handling
    generates particular stage/test result list
    '''
    params = dict(test_whitelist=test_whitelist, test_blacklist=test_blacklist,
                    stage_whitelist=stage_whitelist, stage_blacklist=stage_blacklist,
                    tags_whitelist=tags_whitelist, tags_blacklist=tags_blacklist,
                   enabled=not no_action)
    params = load(istream, config_file=conf, augment=params)
    total = required_actions_count(params)
    processed = 0
    print_progress_info(processed, total)
    for item in params:
        for result in process(item):
            processed += 1
            save_result(ostream, strip_ephemeral(result))
            print_progress_info(processed, total)
Beispiel #4
0
def main(conf,
         istream,
         ostream,
         test_whitelist,
         test_blacklist,
         stage_whitelist,
         stage_blacklist,
         tags_whitelist,
         tags_blacklist,
         no_action,
         parallel_instances=1):
    '''
    main worker function
    performs particular stages handling
    generates particular stage/test result list
    '''
    params = dict(test_whitelist=test_whitelist,
                  test_blacklist=test_blacklist,
                  stage_whitelist=stage_whitelist,
                  stage_blacklist=stage_blacklist,
                  tags_whitelist=tags_whitelist,
                  tags_blacklist=tags_blacklist,
                  enabled=not no_action)
    params = load(istream, config_file=conf, augment=params)
    progress_info = ProgressInfo(params)

    tests = []
    for item in params:
        for test_stage in item['test_stages']:
            for test in item['test_stages'][test_stage]:
                tests.append(
                    dict(test=dict(name=test, stage=test_stage), **item))

    from gevent.pool import Pool
    pool = Pool(size=parallel_instances)
    for result in pool.imap_unordered(
            lambda test: process(progress_info, test), tests):
        save_result(ostream, strip_ephemeral(result))
Beispiel #5
0
def run_analyse(script, codes, start, end):
    open_time = "09:30:00"
    close_time = "15:00:00"
    start_time = "{0} {1}".format(start, open_time)
    end_time = "{0} {1}".format(end, close_time)

    sim_params = create_simulation_parameters(
        start=pd.to_datetime(start_time).tz_localize("Asia/Shanghai").tz_convert("UTC"),
        end=pd.to_datetime(end_time).tz_localize("Asia/Shanghai").tz_convert("UTC"),
        data_frequency="daily",
        emission_rate="daily",
        sids=codes)

    with open(script, 'r') as f:
        algo_text = f.read()
    zp_algo = zipline.TradingAlgorithm(script=algo_text,
                                       namespace={},
                                       capital_base=10e6,
                                       sim_params=sim_params)

    stocks = Market.get_stocks(codes, start, end)
    d = pd.Panel(stocks)

    res = zp_algo.run(d)
    results = {}
    results['parameters'] = {
        'time': datetime.datetime.now(),
        'algorithm': script,
    }
    results['results'] = res
    results['report'] = zp_algo.risk_report
    results['orders'] = zp_algo.blotter.orders
    results['benchmark'] = zp_algo.perf_tracker.all_benchmark_returns
    job = get_current_job(connection=Redis())
    data.save_result(job.id, results)
    return results
Beispiel #6
0
def main(conf, istream, ostream, test_whitelist, test_blacklist, stage_whitelist, stage_blacklist,
            tags_whitelist, tags_blacklist, no_action, parallel_instances=1):
    '''
    main worker function
    performs particular stages handling
    generates particular stage/test result list
    '''
    params = dict(test_whitelist=test_whitelist, test_blacklist=test_blacklist,
                    stage_whitelist=stage_whitelist, stage_blacklist=stage_blacklist,
                    tags_whitelist=tags_whitelist, tags_blacklist=tags_blacklist,
                   enabled=not no_action)
    params = load(istream, config_file=conf, augment=params)
    progress_info = ProgressInfo(params)

    tests = []
    for item in params:
        for test_stage in item['test_stages']:
            for test in item['test_stages'][test_stage]:
                tests.append(dict(test=dict(name=test, stage=test_stage), **item))

    from gevent.pool import Pool
    pool = Pool(size=parallel_instances)
    for result in pool.imap_unordered(lambda test: process(progress_info, test), tests):
        save_result(ostream, strip_ephemeral(result))
Beispiel #7
0
def main(conf, istream, ostream, test_whitelist, test_blacklist,
         stage_whitelist, stage_blacklist, tags_whitelist, tags_blacklist,
         no_action):
    '''
    main worker function
    performs particular stages handling
    generates particular stage/test result list
    '''
    params = dict(test_whitelist=test_whitelist,
                  test_blacklist=test_blacklist,
                  stage_whitelist=stage_whitelist,
                  stage_blacklist=stage_blacklist,
                  tags_whitelist=tags_whitelist,
                  tags_blacklist=tags_blacklist,
                  enabled=not no_action)
    params = load(istream, config_file=conf, augment=params)
    total = required_actions_count(params)
    processed = 0
    print_progress_info(processed, total)
    for item in params:
        for result in process(item):
            processed += 1
            save_result(ostream, strip_ephemeral(result))
            print_progress_info(processed, total)
Beispiel #8
0
]
# Training
history_dilated = model_dilated.fit_generator(
    generator=trainGen,
    steps_per_epoch=NUM_TRAINING_STEP,
    validation_data=valGen,
    validation_steps=NUM_VALIDATION_STEP,
    epochs=NUM_EPOCH,
    callbacks=callbacks)

print("Predict and save results...")
print("...For U-Net with 32 filters...")
testGene = testGenerator(test_path)
result_1 = model_32.predict_generator(testGene, TEST_SIZE, verbose=1)
print("...For U-Net with 64 filters...")
testGene = testGenerator(test_path)
result_2 = model_64.predict_generator(testGene, TEST_SIZE, verbose=1)
print("...For U-Net with dilated convolution...")
testGene = testGenerator(test_path)
result_3 = model_dilated.predict_generator(testGene, TEST_SIZE, verbose=1)
print("...Averaging the prediction results...")
result = (result_1 + result_2 + result_3) / 3
save_result(predict_path, result)

print("Make submission...")
make_submission(predict_path,
                test_size=TEST_SIZE,
                submission_filename=os.path.join(submission_path,
                                                 "submission.csv"))

print("Done!")
Beispiel #9
0
# If the --verbose argument is not supplied, suppress all of the TensorFlow startup messages.
if not args.verbose:
    import os
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
    import tensorflow as tf
    tf.logging.set_verbosity(tf.logging.ERROR)

import models
import data
from datetime import datetime
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
from keras.optimizers import Adam

# Compile the model
model = models.unet2D(size=args.size, ablated=args.ablated)
model.compile(optimizer=Adam(lr=0.0001),
              loss="binary_crossentropy",
              metrics=["accuracy"])
model.load_weights(args.weights)

# Process the images in the test set and save the results to the test/ directory.
test_gen = data.test_generator(f"{args.dir}",
                               num_image=args.tests,
                               target_size=(args.size, args.size))
results = model.predict_generator(test_gen, args.tests, verbose=1)
# Use the custom save_result() method defined in data.py to save the results as
# greyscale .png images.
print("Saving results in ./test/")
data.save_result("test", results)
Beispiel #10
0
else:
    pth = Path('data')
data_gen_args = dict(rotation_range=0.2,
                     width_shift_range=0.05,
                     height_shift_range=0.05,
                     shear_range=0.05,
                     zoom_range=0.05,
                     horizontal_flip=True,
                     fill_mode='nearest')

myGene = train_generator(batch_size=2,
                         train_path=pth / 'membrane/train',
                         image_folder='image',
                         mask_folder='label',
                         aug_dict=data_gen_args,
                         save_to_dir=None)

model = unet()
model_checkpoint = ModelCheckpoint('unet_membrane.hdf5',
                                   monitor='loss',
                                   verbose=1,
                                   save_best_only=True)
model.fit_generator(myGene,
                    steps_per_epoch=300,
                    epochs=1,
                    callbacks=[model_checkpoint])

testGene = test_generator(test_path=pth / 'membrane/test')
results = model.predict_generator(testGene, 30, verbose=1)
save_result(pth / 'membrane/test', results)
Beispiel #11
0
    else:
        print('File {} does not exists'.format(filename))
print(len(test_imgs))

predict_path = "predict_images"
submission_path = "submission"
weight_path = "weights"
weight_list = ["weights_32.h5", "weights_64.h5", "weights_dilated.h5" ]
# weight_list = ["weights_32_dice.h5"]

print("Check weights...")
missing_weight = list(set(weight_list) - set(os.listdir(weight_path)))
if len(missing_weight):
    raise FileNotFoundError("Can not find: " + str(missing_weight))

print("Load models and predict...")
results = 0
for w in weight_list:
    print("...Load " + w + "...")
    model = load_model(os.path.join(weight_path, w), custom_objects={"dice_loss": dice_loss, "f1": f1})
    print("...Predict...")
    testGene = testGenerator(test_imgs)
    results += model.predict_generator(testGene, TEST_SIZE, verbose=1)
results /= len(weight_list)
save_result(predict_path, results, test_index)

print("Make submission...")
make_submission(predict_path, test_size=TEST_SIZE, indices=test_index, submission_filename=os.path.join(submission_path, "submission.csv"))

print("Done!")
Beispiel #12
0
print(f"   Augmentations: \n{indent(pformat(data_gen_args), ' '*18)}")
train, validate = train_generator(batch_size,
                                  'data/filament/train',
                                  'image',
                                  'label',
                                  data_gen_args,
                                  save_to_dir=None)

model = unet(learning_rate=learning_rate)
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(str(
    output_dir / 'unet_filament.hdf5'),
                                                      monitor='val_loss',
                                                      verbose=1,
                                                      save_best_only=True)
es = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                      verbose=1,
                                      patience=10)
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                             histogram_freq=1,
                                             write_images=True)
model.fit(train,
          steps_per_epoch=steps_per_epoch,
          epochs=epochs,
          callbacks=[model_checkpoint, es, tensorboard],
          validation_data=validate,
          validation_steps=1)

testGene = test_generator("data/filament/test")
results = model.predict(testGene, verbose=1)
save_result(output_dir, results)
Beispiel #13
0
predict_path = "predict_images"
submission_path = "submission"
weight_path = "weights"
weight_list=["weights_segnet.h5", "weights_unet.h5", "weights_dunet.h5","weights_unet_attention.h5"
             ,"weights_linknet.h5","weights_dlinknet.h5",'weights_linknet_attention.h5']


method=int(input("choose method:1.validate single modle 2.calculate the voting"))
vote_result=0
if method==1:
    num=int(input("choose  model number you want to validate: 1.segnet 2.unet 3. dunet 4.unet_attention 5.linknet 6.dlinknet 7.link_attention"))
    w=weight_list[num-1]
    model = load_model(os.path.join(weight_path, w), custom_objects={"dice_loss": dice_loss, "f1": f1})
    test = testGenerator(test_path)
    predict_result= model.predict_generator(test, TEST_SIZE, verbose=1)
    print(type(predict_result))
    save_result(predict_path, predict_result)
    
if method==2:
    print("calculate the voting of different models")
    for w in weight_list:
        model= load_model(os.path.join(weight_path, w), custom_objects={"dice_loss": dice_loss, "f1": f1})
        test=testGenerator(test_path)
        vote_result+=model.predict_generator(test, TEST_SIZE, verbose=1)
        save_result(predict_path,vote_result)

print("make submission")
make_submission(predict_path, test_size=TEST_SIZE, submission_filename=os.path.join(submission_path, "submission.csv"))