Пример #1
0
def min_rxn_binary(model, v, niter=5, fixiter=True):
    base.add_objective_constraint(model)
    x = model.addMVar(v.shape[0], name="x", vtype=GRB.BINARY)
    U = np.copy(v.ub)
    L = np.copy(v.lb)
    model.setObjective(sum(x), sense=GRB.MINIMIZE)
    X = np.zeros((x.shape[0], niter))
    sol = np.ones(x.shape)
    for i in range(niter):
        model.addConstr(v <= np.diag(U) @ x)
        model.addConstr(v >= np.diag(L) @ x)
        model.optimize()
        sol = binarize(np.copy(x.X), tol=1e-16)
        X[:, i] = x.X
        print(card(sol))
        U[sol == 0] = U[sol == 0] / 10.0
        L[sol == 0] = L[sol == 0] / 10.0
        if fixiter:
            zeros, indeter, ones = splits(X[:, i])
            x[zeros].lb = 0
            x[zeros].ub = 0
            x[ones].lb = 1
            x[ones].ub = 1
            x[indeter].lb = 0
            x[indeter].ub = 1
    return model, x, X
Пример #2
0
def is_valid_solution(model, x, sol):
    prev_ub = np.copy(x.UB)
    x.UB = binarize(sol)
    model.optimize()
    status = model.status
    x.UB = prev_ub
    model.update()
    return status == GRB.OPTIMAL
Пример #3
0
def main(complete, timeout, output, solver):
    if complete:
        m, n = list(map(int, input('Enter m n: ').split()))
        r = m * n
        e = [(i, j) for i in range(m) for j in range(n)]
    else:
        m, n, r = list(map(int, input('Enter m n r: ').split()))
        e = []
        print('Enter edges, one by line:')
        for _ in range(r):
            e.append(tuple(map(int, input().split())))
    # solve problem
    try:
        instance = optimize(get_data(m, n, e), timeout, output, solver)
        optimum = int(value(instance.OBJ)) // 1000
        crossings = int(value(instance.OBJ)) % 1000
        print(f'Graph is {optimum}-gap planar with {crossings} crossings')
        print('Order for the top vertices')
        print(' '.join(map(str, get_positions(m, instance.pu))))
        print('Order for the bottom vertices')
        print(' '.join(map(str, get_positions(n, instance.pv))))
        print('Edge gaps')
        for x in e:
            print(
                x, 'takes gap for crossing with',
                ' '.join([
                        str(y) for y in e
                        if int(value(instance.x[x, y]) + 0.5) == 1
                    ])
            )
    except NonOptimalException:
        print("failed")
Пример #4
0
def min_rxn_l1_abs(model, v, niter=5, gamma=0.001):
    base.add_objective_constraint(model)
    vf = model.addMVar(v.shape[0], name="vf", lb=0.0, ub=v.ub)
    vr = model.addMVar(v.shape[0], name="vr", lb=0.0, ub=-v.lb)
    model.addConstr(v == vf - vr)
    V = np.ones((v.shape[0], niter))
    wf = np.ones(vf.shape)
    wr = np.ones(vr.shape)
    print("starting iterations")
    for i in range(niter):
        model.setObjective(wf @ vf + wr @ vr, sense=GRB.MINIMIZE)
        model.optimize()
        V[:, i] = v.X
        wf = 1.0 / (gamma + np.abs(vf.X))
        wr = 1.0 / (gamma + np.abs(vr.X))
    return model, v, V
Пример #5
0
def runmodel():
    headers = request.headers
    auth = headers.get("apikey")
    if auth == config['app']['apikey']:
        data = request.get_json()
        result = optimize(data["data"], config)
        return jsonify(result), 200
    else:
        return jsonify({"message": "ERROR: Unauthorized"}), 401
Пример #6
0
def add_fva_bounds(model, v, frac=1.0):
    model.Params.OutputFlag = 0

    if frac is not None:
        base.add_objective_constraint(model, frac)

    orig_obj = model.getObjective()
    orig_sense = model.ModelSense

    for i in range(v.shape[0]):
        model.setObjective(v[i] + 0, GRB.MINIMIZE)
        model.optimize()
        v[i].LB = v[i].X

        model.setObjective(v[i] + 0, GRB.MAXIMIZE)
        model.optimize()
        v[i].UB = v[i].X

    model.setObjective(orig_obj, orig_sense)
    model.update()
Пример #7
0
def main():
    data1 = pd.read_csv('Admission_Predict.csv')
    data2 = pd.read_csv('Admission_Predict_Ver1.1.csv')

    data = pd.concat([data1, data2]).drop('Serial No.', axis=1)
    target = data['Chance of Admit ']
    features = data.drop('Chance of Admit ', axis=1)

    X_train, X_test, y_train, y_test = split(features, target)

    gb = model.base_model()
    pred = gb.predict(X_test)

    print("Our baseline model without tuning gave an R2 of {}".format(
        model.performance_metric(y_test, pred)))

    # Tune 1
    model.optimize(X_train,
                   y_train,
                   regressor=gb,
                   parameter={'n_estimators': [1, 2, 4, 8, 16, 32, 64, 100]})
    visuals.plot_optimization(
        regressor=gb,
        parameter={'n_estimators': [1, 2, 4, 8, 16, 32, 64, 100]})
    gb = gb.set_params(n_estimators=50)

    # Tune 2
    model.optimize(X_train,
                   y_train,
                   regressor=gb,
                   parameter={
                       'max_depth': range(2, 12, 2),
                       'min_samples_split': range(6, 18, 2)
                   })
    visuals.plot_optimization(regressor=gb,
                              parameter={'max_depth': range(2, 20, 2)})
    gb = gb.set_params(max_depth=10)

    # Tune 3
    model.optimize(X_train,
                   y_train,
                   regressor=gb,
                   parameter={
                       'min_samples_split': range(6, 18, 2),
                       'min_samples_leaf': [3, 5, 7, 9, 12, 15]
                   })
    visuals.plot_optimization(regressor=gb,
                              parameter={'min_samples_split': range(6, 18, 2)})
    gb = gb.set_params(min_samples_split=6)
    visuals.plot_optimization(
        regressor=gb, parameter={'min_samples_leaf': [3, 5, 7, 9, 12, 15]})
    gb = gb.set_params(min_samples_leaf=3)

    # Tune 4
    model.optimize(X_train,
                   y_train,
                   regressor=gb,
                   parameter={'max_features': range(1, 8)})
    visuals.plot_optimization(regressor=gb,
                              parameter={'max_features': range(1, 8)})
    gb = gb.set_params(max_features=3)

    # Tune 5
    model.optimize(X_train,
                   y_train,
                   regressor=gb,
                   parameter={'subsample': [0.7, 0.75, 0.8, 0.85, 0.9, 0.95]})
    visuals.plot_optimization(
        regressor=gb,
        parameter={'subsample': [0.7, 0.75, 0.8, 0.85, 0.9, 0.95]})
    gb = gb.set_params(subsample=0.95)

    # Tune 6
    model.robust_model(gb,
                       rates=[0.05, 0.01, 0.005, 0.005],
                       trees=[100, 500, 1000, 1500])
    gb = gb.set_params(learning_rate=0.005, n_estimators=1500)

    gb = gb.fit(X_train, y_train)

    pred = gb.predict(X_test)
    print('Rsquare score of {}'.format(
        np.round(model.performance_metric(y_test, pred), decimals=5)))

    visuals.feature_importance(features.columns, gb.feature_importances_)
Пример #8
0
def train():
    
    train_dir='/home/daijiaming/Galaxy/data3/trainset/'
    train_label_dir='/home/daijiaming/Galaxy/data3/train_label.csv'
    test_dir='/home/daijiaming/Galaxy/data3/testset/'
    test_label_dir='/home/daijiaming/Galaxy/data3/test_label.csv'
    
    train_log_dir = '/home/daijiaming/Galaxy/Dieleman/logs/train/'
    val_log_dir = '/home/daijiaming/Galaxy/Dieleman/logs//val/'
    
    tra_image_batch, tra_label_batch,tra_galalxyid_batch = input_data.read_galaxy11(data_dir=train_dir,
                                                                                    label_dir=train_label_dir,
                                                                                    batch_size= BATCH_SIZE)
    val_image_batch, val_label_batch,val_galalxyid_batch = input_data.read_galaxy11_test(data_dir=test_dir,
                                                                                         label_dir=test_label_dir,
                                                                                         batch_size= BATCH_SIZE)

    x = tf.placeholder(tf.float32, [BATCH_SIZE, 64, 64, 3])
    y_ = tf.placeholder(tf.float32, [BATCH_SIZE, N_CLASSES])
    keep_prob=tf.placeholder(tf.float32)
                            
    logits,fc_output = model.inference(x, BATCH_SIZE, N_CLASSES,keep_prob)
       
    loss =  model.loss(logits, y_)

    accuracy =  model.accuracy(logits, y_)
    
    my_global_step = tf.Variable(0, name='global_step', trainable=False) 
    train_op =  model.optimize(loss, learning_rate, my_global_step)
    
   
    
    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()   
       
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)    
    tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)
    
    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                    break
                
            tra_images,tra_labels = sess.run([tra_image_batch, tra_label_batch])
            _, tra_loss,tra_acc,summary_str = sess.run([train_op,loss, accuracy,summary_op],feed_dict={x:tra_images, y_:tra_labels,keep_prob:0.5})
            
            if step % 50 == 0 or (step + 1) == MAX_STEP:                 
                print ('Step: %d, tra_loss: %.4f, tra_accuracy: %.2f%%' % (step, tra_loss, tra_acc))
#                summary_str = sess.run(summary_op,feed_dict={x:tra_images, y_:tra_labels})
                tra_summary_writer.add_summary(summary_str, step)
                
            if step % 200 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run([val_image_batch, val_label_batch])
                val_loss, val_acc, summary_str = sess.run([loss, accuracy,summary_op],feed_dict={x:val_images,y_:val_labels,keep_prob:1})
                print('**  Step %d, test_loss = %.4f, test_accuracy = %.2f%%  **' %(step, val_loss, val_acc))
#                summary_str = sess.run([summary_op],feed_dict={x:val_images,y_:val_labels})
                val_summary_writer.add_summary(summary_str, step)
                    
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
                
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
        
    coord.join(threads)
    sess.close()
Пример #9
0
def main(_):
    if tf.gfile.Exists(FLAGS.log_dir):
        tf.gfile.DeleteRecursively(FLAGS.log_dir)
    tf.gfile.MakeDirs(FLAGS.log_dir)

    seed = int(time.time())

    with tf.Graph().as_default():
        train_loader = DataLoader(FLAGS.train_set_path, FLAGS.batch_size)
        valid_loader = DataLoader(FLAGS.valid_set_path,
                                  num_valid_samples=FLAGS.num_valid_examples)
        train_images, train_labels = train_loader.load_batch()
        valid_images, valid_labels = valid_loader.load_batch()

        tf.set_random_seed(seed)
        global_step = tf.contrib.framework.get_or_create_global_step()

        with tf.variable_scope("svhn"):
            logits = model.inference(train_images)
            loss = model.loss(logits, train_labels)
            train_op = model.optimize(loss, global_step, FLAGS.learning_rate,
                                      FLAGS.batch_size)

        with tf.variable_scope("svhn", reuse=True):
            logits = model.inference(valid_images)
            top_k_op = tf.nn.in_top_k(logits, valid_labels, 1)

        scaffold = tf.train.Scaffold(init_op=tf.global_variables_initializer())
        variable_averages = tf.train.ExponentialMovingAverage(
            model.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore, max_to_keep=1)

        class _LoggerHook(tf.train.SessionRunHook):
            """Logs loss and runtime."""
            def after_create_session(self, session, coord):
                train_loader.load(session)
                valid_loader.load(session)

            def begin(self):
                self._step = -1
                self._start_time = time.time()

            def end(self, session):
                train_loader.close(session)
                valid_loader.close(session)

            def before_run(self, run_context):
                self._step += 1
                return tf.train.SessionRunArgs(loss)

            def after_run(self, run_context, run_values):
                if self._step % FLAGS.log_frequency == 0:
                    current_time = time.time()
                    duration = current_time - self._start_time
                    self._start_time = current_time

                    loss_value = run_values.results
                    examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
                    sec_per_batch = float(duration / FLAGS.log_frequency)

                    if self._step % FLAGS.eval_frequency == 0:
                        precision = model.evaluate(run_context.session,
                                                   top_k_op,
                                                   FLAGS.num_valid_examples)
                        format_str = (
                            '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                            'sec/batch), precision = %.2f%%')
                        print(format_str %
                              (datetime.now(), self._step, loss_value,
                               examples_per_sec, sec_per_batch, precision))
                    else:
                        format_str = (
                            '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                            'sec/batch)')
                        print(format_str %
                              (datetime.now(), self._step, loss_value,
                               examples_per_sec, sec_per_batch))

        with tf.train.MonitoredTrainingSession(
                scaffold=scaffold,
                checkpoint_dir=FLAGS.log_dir,
                hooks=[
                    tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
                    tf.train.NanTensorHook(loss),
                    _LoggerHook(),
                    tf.train.CheckpointSaverHook(checkpoint_dir=FLAGS.log_dir,
                                                 saver=saver,
                                                 save_steps=100)
                ],
                config=tf.ConfigProto(log_device_placement=FLAGS.
                                      log_device_placement)) as mon_sess:
            while not mon_sess.should_stop():
                mon_sess.run(train_op)
Пример #10
0
# 获取P网络
image, label, bbox_target, landmark_target, cls_loss, bbox_loss, landmark_loss, accuracy, conv4_1, conv4_2, conv4_3 = P_Net(
)

# 构建训练损失函数
total_loss = radio_cls_loss * cls_loss + radio_bbox_loss * bbox_loss + radio_landmark_loss * landmark_loss
avg_total_loss = fluid.layers.mean(total_loss)

# 计算一共多少组数据
label_file = '../data/12/all_data.label'
f = open(label_file, 'r')
num = len(f.readlines())

# 定义优化方法
_, learning_rate = optimize(avg_total_loss, num, cfg.BATCH_SIZE)

# 获取自定义数据
train_reader = myreader.train_reader('../data/12/all_data',
                                     label_file,
                                     batch_size=cfg.BATCH_SIZE)

# 定义一个使用GPU的执行器
place = fluid.CUDAPlace(0) if cfg.USE_GPU else fluid.CPUPlace()
exe = fluid.Executor(place)
# 进行参数初始化
exe.run(fluid.default_startup_program())

# 设置输出的结果
fetch_list = [
    avg_total_loss, accuracy, learning_rate, cls_loss, bbox_loss, landmark_loss
Пример #11
0
SOLUTION_DIR = '../soln/'

configs = car_configs.gen()
configs = [[4, 5, 5, 2, 1, 1]]

n_configs = len(configs)
print('Evaluating {} car configs...'.format(n_configs))

best_time = float('inf')
for i, config in enumerate(configs[:]):
    all_instr = []

    total_time = 0
    for track, track_w in zip(tracks, tracks_w):
        instr, time, gas, tire, v = model.optimize(config, track)
        total_time += track_w * time
        all_instr.append(instr)

    if total_time < best_time:
        best_time = total_time
        best_config = config
        best_instr = all_instr

    print(config, total_time)

    if ((i + 1) % 10 == 0):
        print('{}/{} done.'.format(i + 1, n_configs))
        print('Best: {} with time {}'.format(best_config, best_time))

    #print(config)
Пример #12
0
# fba simulation
# -----------------------------------------------------------------------------

model = read_sbml_model(tiny_sbml)
print(model)


# Iterate through the the objects in the model
print("Reactions")
print("---------")
for x in model.reactions:
    print("%s : %s [%s<->%s]" % (x.id, x.reaction, x.lower_bound, x.upper_bound))

print("")
print("Metabolites")
print("-----------")
for x in model.metabolites:
    print('%9s (%s) : %s, %s, %s' % (x.id, x.compartment, x.formula, x.charge,  x.annotation))

print("")
print("Genes")
print("-----")
for x in model.genes:
    associated_ids = (i.id for i in x.reactions)
    print("%s is associated with reactions: %s" %
          (x.id, "{" + ", ".join(associated_ids) + "}"))


solution = model.optimize()
print(solution)
Пример #13
0
batch_size = 384

# 获取O网络
image, label, bbox_target, landmark_target, cls_loss, bbox_loss, landmark_loss, accuracy, cls_prob, bbox_pred, landmark_pred = O_Net()

# 获取训练的损失函数
total_loss = radio_cls_loss * cls_loss + radio_bbox_loss * bbox_loss + radio_landmark_loss * landmark_loss
avg_total_loss = fluid.layers.mean(total_loss)

# 计算一共多少组数据
label_file = '../data/48/all_data.label'
f = open(label_file, 'r')
num = len(f.readlines())

# 定义优化方法
_, learning_rate = optimize(avg_total_loss, num, batch_size)

# 获取自定义数据
train_reader = myreader.train_reader('../data/48/all_data', label_file, batch_size=cfg.BATCH_SIZE)

# 定义一个使用GPU的执行器
place = fluid.CUDAPlace(0) if cfg.USE_GPU else fluid.CPUPlace()
exe = fluid.Executor(place)
# 进行参数初始化
exe.run(fluid.default_startup_program())

# 设置输出的结果
fetch_list = [avg_total_loss, accuracy, learning_rate, cls_loss, bbox_loss, landmark_loss]

# 训练
for pass_id in range(100):
Пример #14
0
# fba simulation
# -----------------------------------------------------------------------------

model = read_sbml_model(tiny_sbml)
print(model)

# Iterate through the the objects in the model
print("Reactions")
print("---------")
for x in model.reactions:
    print("%s : %s [%s<->%s]" %
          (x.id, x.reaction, x.lower_bound, x.upper_bound))

print("")
print("Metabolites")
print("-----------")
for x in model.metabolites:
    print('%9s (%s) : %s, %s, %s' %
          (x.id, x.compartment, x.formula, x.charge, x.annotation))

print("")
print("Genes")
print("-----")
for x in model.genes:
    associated_ids = (i.id for i in x.reactions)
    print("%s is associated with reactions: %s" %
          (x.id, "{" + ", ".join(associated_ids) + "}"))

solution = model.optimize()
print(solution)
Пример #15
0
def train():
    global_step = tf.train.get_or_create_global_step()
    with tf.device('/cpu:0'):
        picList, classList, attriList = inputs.GetFileNameList(DATA_TRAIN)     #图像地址list和标签属性list
        imageBatch, classBatch, attriBatch = inputs.GetBatchFromFile_Train(picList, classList, attriList, BATCH_SIZE, EPOCH)

    # Build a Graph that computes the predicted HR images from GIBBS RING CLEAR model.
    predAttriBatch = model.plain_model(imageBatch)    #预测属性

    # Calculate loss.
    loss = model.loss(attriBatch, predAttriBatch)  #

    # Get the training op for optimizing loss
    TrainOp = model.optimize(loss, LearningRate, global_step)

    # Create a saver.
    saver = tf.train.Saver(tf.global_variables())

    # Start running operations on the Graph. allow_soft_placement must be set to
    # True to build towers on GPU, as some of the ops do not have GPU implementations.
    config = tf.ConfigProto(log_device_placement=True, allow_soft_placement=True)

    with tf.Session(config=config) as sess:
        print("Initializing Variables...")
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        min_loss = float('Inf')
        max_acc = 0
        try:
            print("Starting To Train...")
            for step in range(MAX_STEPS):
                sess.run(TrainOp)

                if step % 200 == 0:
                    loss_value, classValue, predAttriValue= sess.run([loss, classBatch, predAttriBatch])
                    print("classValue: ", classValue)
                    print("predAttriBalue: ", predAttriValue)
                    accuracy = model.accuracy(classValue, predAttriValue)
                    if min_loss > loss_value:
                        min_loss = loss_value
                    if max_acc < accuracy:
                        max_acc = accuracy

                    with open("../Records/train_records.txt", "a") as file:
                        format_str = "%d\t%.6f\t%.6f\t%.6f\t%.6f\n"
                        file.write(str(format_str) % (
                            step, loss_value, min_loss, accuracy, max_acc))

                    print("%s ---- step %d:" % (datetime.now(), step))
                    print("\tLOSS = %.6f\tmin_Loss = %.6f" % (loss_value, min_loss))
                    print("\tACC = %.4f\tmax_Acc = %.4f" % (accuracy, max_acc))

                if (step % 500 == 0):
                    checkpoint_path = os.path.join(LogDir, 'model.ckpt')
                    print("saving checkpoint into %s-%d" % (checkpoint_path, step))
                    saver.save(sess, checkpoint_path, global_step=step)
                
        except Exception as e:
            print("exception: ", e)
            coord.request_stop(e)

        finally:
            coord.request_stop()
            coord.join(threads, stop_grace_period_secs=10)