Пример #1
0
def run_scenario(data, options, output_dir):
    """
    Run model using specified parameters

    Parameters
    ----------
    data : dict
        Case data

    options : dict
        Model options and parameters to be applied for given scenario

    output_dir : str
        Root output directory
    """

    # Check if results already exist - skip if True
    if check_if_solved(options, output_dir):
        print('Case already solved - skipping:', options)
        return None

    # Construct model and solve
    print('Running:', options)
    m = construct_model(data, use_pu=True)
    m = configure_model(m, options)
    m = solve_model(m)

    # Extract and save results
    solution = get_solution(m, options)
    save_solution(solution, output_dir)
Пример #2
0
def get_bau_average_price(data_dir, scenario_dir, tmp_dir):
    """
    Run model with arbitrarily high baseline to obtain business-as-usual price

    Parameters
    ----------
    data_dir : str
        Root directory containing files used to construct model cases

    scenario_dir : str
        Directory containing k-means clustered scenarios

    tmp_dir : str
        Directory used to cache results (e.g. admittance matrix) when constructing case files
    """

    # Model parameters
    params = {
        'parameters': {
            'P_POLICY_FIXED_BASELINE': 1.5,
        },
        'mode': 'feasibility',
    }

    # Construct data used for case
    data = get_case_data(data_dir,
                         scenario_dir,
                         tmp_dir,
                         params,
                         use_cache=True)

    # Construct model and solve
    print('Running:', params)
    m = construct_model(data, use_pu=True)
    m = configure_model(m, params)
    m = solve_model(m)

    # Extract and save results
    solution = get_solution(m, params)

    # BAU price must be scaled by 100 if use_pu=True
    bau_price = solution['solution']['E_AVERAGE_ELECTRICITY_PRICE'] * 100

    return bau_price
Пример #3
0
                                              distort_rgb=False,
                                              flip_left_right=False,
                                              random_rotation=False,
                                              repeat=False,
                                              width=opts.width,
                                              height=opts.height)

num_test_files = len(os.listdir(opts.test_image_dir))
num_test_steps = num_test_files // opts.batch_size
print("num_test_files=", num_test_files, "batch_size=", opts.batch_size,
      "=> num_test_steps=", num_test_steps)

# training model.
train_model = model.construct_model(
    width=opts.patch_width_height or opts.width,
    height=opts.patch_width_height or opts.height,
    use_skip_connections=not opts.no_use_skip_connections,
    base_filter_size=opts.base_filter_size,
    use_batch_norm=not opts.no_use_batch_norm)
model.compile_model(train_model,
                    learning_rate=opts.learning_rate,
                    pos_weight=opts.pos_weight)
print("TRAIN MODEL")
print(train_model.summary())

# test model.
test_model = model.construct_model(
    width=opts.width,
    height=opts.height,
    use_skip_connections=not opts.no_use_skip_connections,
    base_filter_size=opts.base_filter_size,
    use_batch_norm=not opts.no_use_batch_norm)
Пример #4
0
bs = 81

train_dt = tf.data.Dataset.from_tensor_slices(
    ((weather[train_index], countyID[train_index]),
     rice[train_index])).repeat().shuffle(10000).batch(bs)
valid_dt = tf.data.Dataset.from_tensor_slices(
    ((weather[valid_index], countyID[valid_index]),
     rice[valid_index])).repeat().batch(bs)
all_dt = tf.data.Dataset.from_tensor_slices(
    ((weather[all_index], countyID[all_index]),
     rice[all_index])).repeat().shuffle(10000).batch(bs)

from model import construct_model

merge_model = construct_model()

cbk = EarlyStopping(monitor='val_mean_squared_error',
                    min_delta=0,
                    patience=100,
                    verbose=1,
                    mode='min',
                    baseline=None,
                    restore_best_weights=True)

model_trained = merge_model.fit(train_dt,
                                epochs=500,
                                steps_per_epoch=len(train_index) // bs,
                                validation_data=valid_dt,
                                validation_steps=1,
                                callbacks=[cbk])
Пример #5
0
                                                 'train_info.csv'),
                                    index=False)
    pd.DataFrame(cv_info).to_csv(os.path.join(_folder_to_save, 'cv_info.csv'),
                                 index=False)

    print(len(train_info), len(cv_info))
    print('number of positive images %d, %d' %
          (sum([f[2] for f in train_info]), sum([f[2] for f in cv_info])))

    datareader = DataReader(train_info, transform=train_aug_ops)
    train_generator = tdata.DataLoader(datareader,
                                       batch_size=16,
                                       shuffle=True,
                                       pin_memory=True)

    model = construct_model()
    model.load_state_dict(
        torch.load(os.path.join(_folder_to_save, 'model_init_last.pth')))
    model.cuda()

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=opt_lr,
                                 weight_decay=opt_wdecay)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,
                                                       0.95,
                                                       last_epoch=-1)

    cv_datareader = DataReader(cv_info, transform=constant_ops)
    cv_generator = tdata.DataLoader(cv_datareader,
                                    batch_size=16,
                                    shuffle=False,
# Smooth/ Scale data
x_train, x_val, y_train, y_val = DataPreprocess(x_train,
                                                x_val,
                                                y_train,
                                                y_val,
                                                rdm=args.seed,
                                                smooth=args.smooth,
                                                scale=args.scale)

current_time = str(datetime.datetime.now())[5:-7]
model_path = os.path.join(args.save_dir,
                          'RDNN_s{}_{}.h5'.format(args.seed, current_time))
# current time is append to model path
print("Model Path: {}".format(model_path))

model = md.construct_model()
if args.train:

    if args.class_weights is None:
        class_weights = dict(
            enumerate(
                class_weight.compute_class_weight('balanced',
                                                  classes=np.unique(y_train),
                                                  y=y_train.reshape(-1))))
    else:
        class_weights = dict(enumerate(args.class_weights))

    print(class_weights)

    if args.metric == "f1score":
        model.compile(optimizer='adam',
Пример #7
0
# embedding sentences
prem = embed(premise)
hypo = embed(hypothesis)

stope = timeit.default_timer()
timee = (stope - start) / 60

print(" Embeddings loaded in {:.2f} mins".format(timee))

# construct model
pred = construct_model(prem,
                       hypo,
                       SENT_HIDDEN_SIZE,
                       ACTIVATION,
                       L2,
                       DP,
                       args.enable_projection,
                       projection_mode=args.projection_mode,
                       dot_align=args.dot_alignment,
                       sum_strategy=args.sum_mode,
                       dense_layers=args.dense_layers,
                       outlayer_count=2 if args.binary else 3)

model = Model(inputs=[premise, hypothesis], outputs=pred)
#model.compile(optimizer=OPTIMIZER, loss=['categorical_crossentropy','mean_squared_logarithmic_error'], metrics=['accuracy'])
model.compile(optimizer=OPTIMIZER,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.summary()

print("Training")
Пример #8
0
                    type=str,
                    default=None,
                    help='if set, load weights from this model file')

opts = parser.parse_args()
print(opts)

u.ensure_dir_exists("runs/%s" % opts.run)

triplet_selector = triplet_selection.TripletSelection(
    opts.img_dir, opts.negative_frame_range, opts.negative_selection_mode)

examples = data.a_p_n_iterator(opts.batch_size, triplet_selector)

model, inputs, loss_fn = m.construct_model(opts.embedding_dim,
                                           opts.model_input,
                                           opts.learning_rate, opts.margin)


class NumZeroLossCB(callbacks.Callback):
    def __init__(self, batch_size=16):
        self.batch_size = batch_size
        self.sess = tf.Session()
        self.examples = (data.a_p_n_iterator(
            self.batch_size,
            triplet_selector).make_one_shot_iterator().get_next())
        self.summary_writer = tf.summary.FileWriter("tb/%s" % opts.run)

    def on_epoch_end(self, epoch, logs):
        # TODO: how do we just use the models iterator here? don't care
        # that it's "wastes" examples doing this eval, it's all generator
Пример #9
0
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--embedding-dim',
                    type=int,
                    default=64,
                    help="image embedding dim")
parser.add_argument('--model-input',
                    type=str,
                    default='model',
                    help='where to load model from')
parser.add_argument('--embeddings-output',
                    type=str,
                    default='embeddings.npy',
                    help='where to write embedding npy')
opts = parser.parse_args()

model = m.construct_model(embedding_dim=opts.embedding_dim)
model.load_weights(opts.model_input)

filenames = list(u.slurp_manifest(opts.manifest))


def filenames_generator():
    for filename in filenames:
        yield filename


def decode_img(img_name):
    img = tf.image.decode_jpeg(tf.read_file(img_name))  # (H, W, 3)   uint8
    img = tf.cast(img, tf.float32)
    img = (img / 127.5) - 1.0  # (-1, 1)
    return img
Пример #10
0
f.close()

f = open(test_path)
test_nums = len(f.readlines())  # number of train samples
f.close()

if __name__ == '__main__':
    # 指定使用显卡
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"

    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.90  # 占用GPU90%的显存
    K.set_session(tf.Session(config=config))

    # 创建模型
    extract_feature_model, sr_model = construct_model(FLAGS.num_classes)

    # 创建优化器
    opt = Adam(lr=FLAGS.learn_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    sr_model.compile(loss='categorical_crossentropy',
                     optimizer=opt,
                     metrics=['accuracy'])

    # 学习率衰减
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=10,
                                  min_lr=1e-8,
                                  mode="min",
                                  cooldown=10,
                                  verbose=1)