示例#1
0
def run():

    input_txt = __prompt()
    input_cmd = __easy_parse(input_txt)
    #input_cmd = __parse(input_txt)

    model.run(input_cmd)
示例#2
0
def test_recording():
    with open(log_path, "a") as file:
        file.write("recording activate rate is as follows.\n")

    data = from_pickle()

    for folder in data.keys():
        recording_sum = 0
        activate_sum = 0
        for recording in data[folder]:
            if len(recording) < 8000:
                continue

            recording_sum += 1
            section_num = math.ceil(len(recording) / 8000)
            for i in range(section_num):
                if i == section_num - 1:
                    result = run(recording[-8000:])
                else:
                    result = run(recording[i * 8000:(i + 1) * 8000])
                if result == 0:
                    activate_sum += 1
                    break

        with open(log_path, "a") as file:
            file.write(folder + ": " + str(activate_sum) + "/" +
                       str(recording_sum) + " = " +
                       str(activate_sum / recording_sum) + "\n")
def main():
    """
    main
    """
    models = {
        "resnet": model.ResNet50Benchmark,
        "mobilenet": model.ResNet50Benchmark,
        "vgg": model.ResNet50Benchmark,
        "googlenet": model.ResNet50Benchmark,
        "shufflenet": model.ResNet50Benchmark,
        "MobileNet_SSD": model.ResNet50Benchmark,
        "deeplab": model.ResNet50Benchmark,
        "rcnn": model.RcnnBenchmark,
        "yolo": model.YoloBenchmark,
        "transformer": model.TransformerBenchmark,
        "bert": model.BertBenchmark
    }
    args = parse_args()
    model = models.get(args.model)()
    model.set_config(use_gpu=args.device == 'gpu',
                     model_dir=args.model_dir,
                     model_filename=args.model_filename,
                     params_filename=args.params_filename,
                     use_tensorrt=args.use_tensorrt,
                     use_anakin=args.use_anakin,
                     model_precision=args.model_precision)
    tensor = model.load_data(args.filename)
    warmup = args.warmup
    repeat = args.repeat
    model.run(tensor, warmup, repeat)
示例#4
0
文件: main.py 项目: xuepo99/zero-shot
def main():
    # read config
    config = ConfigParser.ConfigParser()
    config.read('zero-shot.cfg')
    dataset = config.get('data', 'dataset')
    method = config.get('model', 'method')

    # load data
    data = load_data.run(dataset, method, config)

    # train model
    model.run(data, method, config)
示例#5
0
def test_frame():
    with open(log_path, "a") as file:
        file.write("frame activate rate is as follows.\n")

    data = from_pickle()

    for folder in data.keys():
        activate_sum = 0
        frame_sum = 0

        for recording in data[folder]:
            sr = 8000
            th = 0.05
            r = 0.1
            s = max(0, (recording < th).argmin() - int(r * sr))
            t = min(-1, -((recording < th)[::-1].argmin()) + int(r * sr))
            recording = recording[s:t]

            if folder == "./recordings/5cm-pen-re/":
                window = 1000
                bar_l = 1
                bar_h = 1
                section = 0
                for i in range(int(len(recording) / window)):
                    sum = np.sum(recording[i * window:(i + 1) * window]**2)
                    if sum > bar_h:
                        break
                    if sum <= bar_l:
                        section = i
                recording = recording[(section + 1) * window:]

            if len(recording) < 8000:
                continue

            section_num = math.ceil(len(recording) / 8000)
            for i in range(section_num):
                frame_sum += 1
                if i == section_num - 1:
                    result = run(recording[-8000:])
                else:
                    result = run(recording[i * 8000:(i + 1) * 8000])
                if result == 0:
                    activate_sum += 1

        with open(log_path, "a") as file:
            file.write(folder + ": " + str(activate_sum) + "/" +
                       str(frame_sum) + " = " + str(activate_sum / frame_sum) +
                       "\n")
示例#6
0
    def run_game(self, u):
        seed = seeds[u]
        acts = actions[u]
        print 'running', u, seed, acts


        return model.run(seed, *acts)
示例#7
0
def selectAudio(name,path):

    result = model.run(path+"/"+name)

    query = "SELECT phrase_id, upload_at FROM audio WHERE name = '"+name+"' LIMIT 1;"
    data = ""
    cursor = conn.run_query(query,data)
    phrase_id = ""
    upload_at = ""
    for (phrase, date_audio) in cursor:

        phrase_id = str(phrase)
        upload_at = date_audio

    #insertPhrases(users_id,'quz','esp','txt',result,result,0,upload_at)

    query = "SELECT user_id, text_source, text_target FROM phrase  WHERE phrase_id = '"+phrase_id+"' LIMIT 1;"
    cursor = conn.run_query(query,data)
    text_source = ""
    text_target = ""
    user_id = ""
    for (u, q, s) in cursor:
        user_id = u
        text_source = q
        text_target = s

   #data = {'user_id': user_id ,'upload_at': upload_at, 'name':name,'text_source':text_source,'text_target':text_target}
    data = {'user_id': "1" ,'upload_at': "22-02-18", 'name':"hispana.wav",'text_source':result,'text_target':result}
    final = final = json.dumps(data,ensure_ascii=False).encode('utf8')
    return final
示例#8
0
def run_for_plotting():

	# run params
	N = 200
	time = 0.02
	speed = 2.0 # motion distance is equal to speed (we assume time = 1)

	# the controller ( P, I, D, dt )
	#pid_controller = controller.PID_Controller(0.4, 0.001, 0.01, time)
	# why does this work so well? pid_controller = controller.PID_Controller(0.0, 0.0, 0.01, time)
	#pid_controller = controller.PID_Controller(0.2, 0.001, 0.01, time)
	pid_controller = controller.PID_Controller(0.5, 0.038, 0.0108, time)

	#build the track
	mytrack = track.get_track_1()
	line_seg_track = track.convert_to_line_segs(mytrack)
	track.write_to_file(mytrack, 'generated_files/track.csv')

	# create the car
	mycar = model.build_default_car(time, speed) 
	mycar.write_to_file('generated_files/car_data.csv')


	# Main program
	pos_out, sense_out  = model.run(N, time, pid_controller, line_seg_track, mycar, speed)

	f_Pos = open('generated_files/run_output.csv','w')
	for pos in pos_out:
			f_Pos.write("%s\n" % (pos))
	f_Pos.close()

	f_Sense = open('generated_files/sense_output.csv','w')
	for pos in sense_out:
			f_Sense.write("%s\n" % (pos))
	f_Sense.close()
示例#9
0
def uploads():
    if request.method == 'GET':
        return render_template('upload.html')
    else:
        if 'content' not in request.files:
            flash('No content image selected')
            return redirect(request.url)
        content = request.files['content']

        if 'style' not in request.files:
            flash('No style image selected')
            return redirect(request.url)
        style = request.files['style']

        if content.filename == '' or style.filename == '':
            flash('Error, please, try again')
            return redirect(request.url)

        if style and allowed_file(style.filename) and \
            content and allowed_file(content.filename):

            style_path = os.path.join(app.config['UPLOAD_PATH'], 'style.jpeg')
            style.save(style_path)
            content_path = os.path.join(app.config['UPLOAD_PATH'],
                                        'content.jpeg')
            content.save(content_path)
            return run()
示例#10
0
def main(src):
    if not src:
        err("Argument list empty")
        quit()
    for path in src:
        with open(path, 'r') as f:
            txt = f.read()
        print(run(txt))
示例#11
0
def main(args):
    batch_size = args.batch_size
    n_epoch = args.n_epoch
    n_step = args.n_step
    keep_prob = args.keep_prob
    n_hidden_units = args.n_hidden_units
    embedding_size = args.embedding_size
    initial_learning_rate=args.initial_learning_rate
    final_learning_rate=args.final_learning_rate
    assert args.embedding in ['random', 'one_hot'], 'Unrecognized embedding method'
    random_embedding = True if args.embedding == 'random' else False
    assert args.granularity in ['single', 'multi'], 'Unrecognized granularity'
    multi_granined = True if args.granularity == 'multi' else False
    assert args.granularity_out in ['single', 'multi'], 'Unrecognized granularity'
    multi_granined_out = True if args.granularity_out == 'multi' else False
    assert args.train_mode in ['step', 'epoch'], 'Unreconized traning mode (please specify step or epoch)'
    
    PrepData = IO()
    train_response_list, question_list = PrepData.load_model_input(fname_TrainData, sep=',')
    test_response_list, question_list = PrepData.load_model_input(fname_TestData, sep=',', question_list=question_list)
    id_encoding = PrepData.question_id_1hotencoding(question_list)
    category_map_dict = PrepData.load_category_map(fname_MapData, sep=',')
    category_encoding = PrepData.category_id_1hotencoding(category_map_dict)

    skill2category_map =PrepData.skill_idx_2_category_idx(category_map_dict, category_encoding)
    n_id = len(id_encoding)
    n_categories = len(category_encoding)

    # print {skill: category_encoding[category_map_dict[skill]] for skill in category_map_dict.keys()}
    # print skill2category_map

    train_batches = BatchGenerator(train_response_list, batch_size, id_encoding, n_id, n_id, n_categories, random_embedding=random_embedding, skill_to_category_dict=skill2category_map, multi_granined_out=multi_granined_out)
    test_batches = BatchGenerator(test_response_list, batch_size, id_encoding, n_id, n_id, n_categories, random_embedding=random_embedding, skill_to_category_dict=skill2category_map, multi_granined_out=multi_granined_out)

    sess = tf.Session()
    run(sess, train_batches, test_batches, \
        option=args.train_mode, record_performance=True, \
        model_saved_path=os.path.join(MODEL_FOLDER, MODEL_FILE),
        n_step=n_step, random_embedding=random_embedding, multi_granined=multi_granined, \
        n_categories=n_categories, out_folder=DATA_FOLDER, out_file=fname_Result, \
        keep_prob=keep_prob, n_hidden_units=n_hidden_units, embedding_size=embedding_size, \
        initial_learning_rate=0.001, final_learning_rate=0.00001,
        multi_granined_out=multi_granined_out)
    # tensorboard --logdir logs
    writer = tf.summary.FileWriter(MODEL_LOG_FOLDER, sess.graph) # http://localhost:6006/#graphs on mac
    sess.close()
def do_magic(image_path):
    '''Do Your Image mining stuff right here & return some json data to your 
		android app using 'result' variable.
	'''
    # result = {'status': 'done'} # as an example
    result = run(image_path)

    # uncommant this line if you want to remove the image after dealing with it.
    #os.remove(image_path)
    return jsonify(result)
示例#13
0
    def swi_demand_json(self, model, index, **params):
        p = {}
        for k, v in params.items():
            if k.startswith('key_'):
                p[k[4:]] = float(v)

        import gbm
        model = getattr(gbm.models, model)()

        r = model.run(**p)
        data = model.plot_nvd3(r)

        return json.dumps(dict(main=data, index=int(index)))
示例#14
0
def main():
    assert len(factor_defns) == len(
        experiments[0])  # Same order - must use appropriate design

    experiment_number = 0
    run_number = 0

    with open("results.csv", "w") as f:
        for experiment in experiments:

            factors = {
                k: factor_defns[k][v]
                for k, v in zip(sorted(factor_defns.keys()), experiment)
            }

            for environment_specification in get_environment_specification():

                environments = generate_environment(environment_specification,
                                                    factors['BY_LINEAGE'])
                write_environment(run_number, environments)

                for repeat in range(0, N_REPEATS):

                    print("{0}/{1}".format(
                        run_number + 1,
                        N_REPEATS * N_ENVIRONMENTS * len(experiments)))
                    results = model.run(
                        factors=factors,
                        population=init_population(POPULATION_SIZE),
                        generations=GENERATIONS,
                        population_limit=10,
                        environment=environments)

                    if run_number == 0:
                        f.write(
                            format_results_header(
                                construct_line(run_number, experiment_number,
                                               environment_specification,
                                               results[0], factors)) + "\n")

                    f.write("\n".join([
                        format_results_line(
                            construct_line(run_number, experiment_number,
                                           environment_specification, result,
                                           factors)) for result in results
                    ]))
                    f.write("\n")

                    run_number += 1

            experiment_number += 1
示例#15
0
    def __init__(self, start, n, runs):
        # compute initial value
        self.path = []
        self.cost = []
        val = [
            model.run(start[0], start[1])['total_cost'] for j in range(runs)
        ]
        best = np.mean(val)
        self.path = start
        self.cost.append(best)

        # complete n steps search
        for i in range(1, n):
            next = self.neighbor(start)
            val = [
                model.run(next[0], next[1])['total_cost'] for j in range(runs)
            ]
            val = np.mean(val)
            # only select neighbor if solution improves
            if val < best:
                best = val
                self.path = np.vstack([self.path, next])
                self.cost.append(val)
                start = next
示例#16
0
	def post(self):
		try: 
			formData = request.json
			formData = pd.json_normalize(formData)
			model.addData(formData)
			response = jsonify({
				"statusCode": 200,
				"status": "Prediction made",
				"result": model.run()
			})
			response.headers.add('Access-Control-Allow-Origin', '*')
			return response
		except Exception as error:
			return jsonify({
				"statusCode": 500,
				"status": "Could not make prediction",
				"error": str(error)
			})
def main(targets):

    if 'clean' in targets:
        shutil.rmtree('result', ignore_errors=True)

    if 'test-project' in targets:
        cfg = load_params(TEST_PROJECT_PARAMS)
        print("Running small set of testing data")
        try:
            [_ for _ in run(**cfg)]
            print("Result saved to result folder")
        except:
            print('Unknown eror expected. This should not happen')

    if 'test-data' in targets:
        cfg = load_params(TEST_DATA_PARAMS)
        try:
            [_ for _ in download_apks(**cfg)]
        except:
            print('Unknown eror expected. This should not happen')
        finally:
            print(cfg["download_amount"] * len(cfg["catagory"]),
                  'apps downloaded, decomplied and ingested to ',
                  cfg['apk_path'])
            print('Finished Data ingestion')

    if 'train-data' in targets:
        cfg = load_params(TRAIN_DATA_PARAMS)
        try:
            [_ for _ in download_apks(**cfg)]
        except TypeError:
            print('With Type Error. But it does not affect the process')
        except:
            print('Unknown eror expected. This should not happen')
        finally:
            print(cfg["download_amount"] * len(cfg["catagory"]),
                  'apps downloaded ingested to ', cfg['apk_path'])
            print('Finished Data ingestion')

    if 'process' in targets:
        print('Converting apk to smali')
        subprocess.check_output(["./temp.sh", 'data'])
        print('Finished converting apk to smali')
示例#18
0
    def Run(self, request, context):
        # Run detection
        buff = BytesIO(request.input)
        img = Image.open(buff)
        result = model.run(img)

        # Send response
        num_detections = result['num_detections']
        resp = bytes2boxes_pb2.Bytes2BoxesReply()
        for i in range(num_detections):
            detect = resp.detections.add()
            detected_class = result['detection_classes'][i]
            detect.category = model.category_index[detected_class]['name']
            detect.score = result['detection_scores'][i]
            detected_box = result['detection_boxes'][i]
            detect.x = detected_box[1]
            detect.y = detected_box[0]
            detect.width = detected_box[3] - detect.x
            detect.height = detected_box[2] - detect.y

        return resp
示例#19
0
def main(parameters_file=None):
    """Produce data"""

    param = parameters.load(parameters_file)

    if param.running_mode == 'unique':
        seed = np.random.randint(2**32)
        bkp = model.run((seed, param))
        file_name = bkp.save()
        print("Data have been saved using file name: '{}'.".format(file_name))

        try:
            analysis.separate.pos_firmA_over_pos_firmB(file_name)
        except _tkinter.TclError:
            print("Figures can not be produced if there is no graphic server.")

    else:
        print('Parameters are: ', param.dict())

        pool = mlt.Pool()

        backups = []

        seeds = np.random.randint(2**32, size=param.n_simulations)

        for bkp in tqdm.tqdm(pool.imap_unordered(
                model.run, zip(seeds, (param, ) * param.n_simulations)),
                             total=param.n_simulations):
            backups.append(bkp)

        pool_backup = backup.PoolBackup(parameters=param, backups=backups)

        file_name = pool_backup.save()

        print("Data have been saved using file name: '{}'.".format(file_name))

        try:
            analysis.pool.distance_over_fov(file_name=file_name)
        except _tkinter.TclError:
            print("Figures can not be produced if there is no graphic server.")
示例#20
0
 def Run(self, request, context):
     """Runs the detection and returns a Image2FacesReply."""
     # Run detection.
     buff = BytesIO(request.input)
     img = Image.open(buff)
     if img.mode != 'RGB':
         img = img.convert('RGB')
     img_array = np.array(img, np.uint8)
     result = model.run(img_array)
     print('found %s faces' % len(result))
     # Build Response proto.
     resp = image2faces_pb2.Image2FacesReply()
     for f in result:
         x1, y1, x2, y2, features = f
         face = resp.faces.add()
         face.x1 = x1
         face.y1 = y1
         face.x2 = x2
         face.y2 = y2
         face.facenet = base64.b64encode(features).decode('utf-8')
         # face.facenet.extend(facenet)
     # Send Response.
     return resp
示例#21
0
def slim_run(objective,
             querry,
             *file_path,
             Relax=False,
             Limit=None,
             New_Pamar=None,
             New_Setting=None):
    """ Minimalistic run function, solve the model with given inputs
        and returns a name-value dictionnary for each item in the querry list
        Saves all results in a hdf5 file to the specified path.
    """
    # Solve the model
    m = model.run(objective,
                  Limit=Limit,
                  Relax=Relax,
                  New_Pamar=New_Pamar,
                  New_Setting=New_Setting)

    # Querry variable values
    Querried_results = {}
    for q in querry:
        Querried_results[q] = m.getVarByName(q).x

    if file_path:
        # Save results to path
        var_results, var_meta = results.get_all(m, 1e-6, Days, Hours, Periods)
        results.save_df_to_hdf5(var_results, var_meta, file_path[0], Days,
                                Hours, Periods)

    # file_name = 'run_info.txt'
    # with open(os.path.join(file_path[0], file_name), 'w') as f:
    #     for v in m.getVars():
    #         if v.x > 2000:
    #             print('Warning high variable value', file=f)
    #             print('{}: {:.0f}'.format(v.VarName, v.x), file=f)

    return m, Querried_results
示例#22
0
    def Run(self, request, context):
        # Run detection
        buff = BytesIO(request.image)
        img = Image.open(buff)
        img_arr = np.array(img)
        img_arr_format = img_arr[:, :, ::-1].copy()
        results = model.run(img_arr_format)

        # Send response
        resp = image2pose_pb2.Image2PoseReply()
        image_w, image_h = img.size
        centers = {}
        for human in results:
            pose = resp.result.poses.add()
            pose.id = -1
            pose.type = pose_pb2.POSE_TYPE_OPENPOSE_FULL
            for i in range(model.NUM_JOINTS):
                pt = pose.points.add()
                if i not in human.body_parts.keys():
                    continue
                pt.x = human.body_parts[i].x
                pt.y = human.body_parts[i].y

        return resp
示例#23
0
            action='store',
            type=int,
            dest='latent_factors',
            default=10,
            help='Define number of latent_factors')

    parser.add_argument('-method',
            action='store',
            type=str,
            dest='method',
            default='als',
            help='The method in which to solve train the model')

    parser.add_argument('-to',
            dest='to',
            nargs='+',
            default=['*****@*****.**'],
            help='Mail address')

    parser.add_argument('-path',
            dest='path',
            type=str,
            default='./',
            help='The path to the dataset directory')
    return parser.parse_args()

if __name__ == "__main__":
    conf = get_config(init())
    # countdown(100, conf)
    model.run(conf)
示例#24
0
import model
import difference_schme

print 'RUN!'

model.run()
difference_schme.run()
示例#25
0
def run():
    model.load(args.model_path, args.checkpoint_path)
    rgb = misc.imread(args.input)
    print(rgb)
    result = model.run(rgb)
    misc.imsave('model_test_result.png', result)
示例#26
0
from model import run, run2, run3

run('train.csv', 'test.csv', 'out4.csv')
示例#27
0
文件: opt.py 项目: julfy/ml-tloe
# from hyperopt import fmin, tpe, hp
# space = [hp.quniform('lr', 0.00001, 1, 0.00001),
#          hp.quniform('bs', 100, 10000, 100),
#          hp.quniform('fhl', 10, 200, 10),
#          hp.quniform('shl', 10, 200, 10)]

numpy.set_printoptions(threshold='nan')
numpy.set_printoptions(precision=2)

transform.transform_data ("/home/bogdan/work/repos/ml-tloe/serps/results/*", 'expanded', 10000)

data = load.read_data_sets ('expanded/*',0.3,0.1, num = 00000);

model.create ( H1=1, H2=50 )

# model.train (data, learning_rate=0.001, batch_size=100000, lmbda=0, ermul=10000, restore=False)

model.run(data)

################################################################################
# def cost ((lr, bs, fhl, shl)):
#     return model.train_once (data, lr, int(bs), 0, int(fhl), int(shl), 31, 1) #(data, 0.003, 5000, 0, 150, 50, 31, 1)

# best = fmin(cost,
#             space,
#             algo=tpe.suggest,
#             max_evals=1000)

# print best
示例#28
0
#!/usr/bin/env python
# coding: utf-8

import tensorflow as tf
import os
import image
import model
import ssl

content_path = 'james.jpg'
style_paths = ['Vincent_van_Gogh_69.jpg', 'Vincent_van_Gogh_604.jpg']

#image.py用的github上的原码,我过几天再更新一版

if __name__ == "__main__":
    best, best_loss = model.run(content_path, style_paths, iteration=200)
    image.saveimg(best, 'best.jpg')
示例#29
0
def on_button_pressed_run():
    print(len(image_filenames))

    # Fatal user mistakes (the filter won't run without these)
    if not image_filenames:
        view.messagebox.showwarning(
            title="Missing Images",
            message=
            "Before running the filter, please click \"Load Images\" and select the images on which to run the filter"
        )
        return
    if not heatmap_filenames:
        view.messagebox.showwarning(
            title="Missing Heatmaps",
            message=
            "Before running the filter, please click \"Load Heatmaps\" and select the heatmaps on which to run the filter"
        )
        return
    if len(image_filenames) < len(heatmap_filenames):
        view.messagebox.showwarning(
            title="Input Mismatch: Too Many Heatmaps; Not Enough Images",
            message=
            f"You've selected {len(image_filenames)} image(s) and {len(heatmap_filenames)} heatmap(s). There should be exactly one heatmap per image. \nPlease check that you have selected all of the files that you need."
        )
        return
    if len(image_filenames) > len(heatmap_filenames):
        view.messagebox.showwarning(
            title="Input Mismatch: Too Many Images; Not Enough Heatmaps",
            message=
            f"You've selected {len(image_filenames)} image(s) and {len(heatmap_filenames)} heatmap(s). There should be exactly one heatmap per image. \nPlease check that you have selected all of the files that you need."
        )
        return
    if not save_directory:
        view.messagebox.showwarning(
            title="Missing Save Location",
            message=
            "Before running the filter, please click \"Choose Save Location\" and select a folder for the newly generated images to be saved to."
        )
        return

    # Possible user mistakes (the user may have made a mistake, or may be intending this behavior)
    if not active_filters:
        is_ok = view.messagebox.askokcancel(
            title="No Filters Were Selected",
            message=
            "No Filters Were Selected. The generated images will be the same as the input images.\nIf this is intentional, please press \"ok\" to continue."
        )
        if not is_ok:
            return

    # Finally, run the filter on all of the files
    view.change_state("disabled")
    for i in range(len(image_filenames)):
        model.run(image_filenames[i], heatmap_filenames[i], active_filters)
        filename_without_path = split(image_filenames[i])[1]
        path = save_directory + "/" + filename_without_path
        cv2.imwrite(filename=path, img=model.get_output_image())
        # # display
        # imshow("filtered", model.get_output_image())
        # waitKey(0) # wait for user input to show next image
    count = len(image_filenames)
    view.messagebox.showinfo(
        title="Filter complete!",
        message=str(count) +
        (" images have " if count != 1 else " image has ") +
        "been generated and saved to the folder:\n" + save_directory)
    view.change_state("normal")
示例#30
0
    if not args.scan_path:
        if cfg.scan_path:
            args.scan_path = cfg.scan_path
        else:
            print(
                'Path of folder to scan is not set.',
                '\nProvide path by running "run.py -s path" or put it into reminder.cfg "scan_path".'
            )
            exit(1)

    if not os.path.exists(args.scan_path):
        print('Can\'t find folder or file at provided path.')
        exit(1)

    if args.log_level:
        if args.log_level == 1:
            logging.basicConfig(evel=logging.INFO, format='%(message)s')
        elif args.log_level > 1:
            logging.basicConfig(
                level=logging.DEBUG,
                format=
                '%(asctime)s,%(msecs)03d - %(funcName)-18s - %(message)s',
                datefmt='%X')

    scheduler = r_scheduler.RScheduler()
    interface = cli.CliInterface()

    model.run(args.scan_path, cfg)
    scheduler.run()
    interface.run()
import model as model
import shutil
import os, time
dir_to_watch = '/data/vanguard_FAQ/watch_dir/'
while True:
    time.sleep(1)
    files_list = os.listdir(dir_to_watch)
    print("polling...")
    for fl in files_list:
        if fl.endswith('end'):
            data_id = fl.split('.')[0]
            print("training started for id "+data_id)
            data_file = dir_to_watch+data_id+".data"
            shutil.copyfile(data_file, '../data/train_data.txt')
            shutil.copyfile(data_file, '../data/valid_data.txt')
            shutil.copyfile(data_file, '../data/train2.txt')

            model.run()
            os.remove(dir_to_watch+fl)
            os.remove(data_file)
示例#32
0
    def run_game(self, u):
        # TODO: add command line argument to set this seed globally
        seed = uuid_package.UUID(u).int & 0x7fffffff
        acts = actions[u]

        return model.run(seed, *acts)
示例#33
0
def run(objective,
        Reload=False,
        Relax=False,
        Pareto=False,
        Limit=None,
        Plot=True,
        Summary=True,
        Save_txt=False,
        New_Pamar=None,
        New_Setting=None):
    """ Solve the optimization model once then store the result in hdf5 format.
        # TODO save inputs into the resut folder
    """
    # Option to generate a new input.h5 file
    if Reload:
        import write_inputs
        write_inputs.write_arrays('default', Cluster=True)

    start_solve = time.time()
    # Select objective and solve the model
    m = model.run(objective,
                  Relax=Relax,
                  Limit=Limit,
                  New_Pamar=None,
                  New_Setting=None)

    end_solve = time.time()
    print('model solve time: ', end_solve - start_solve, 's')
    start_write = time.time()

    # Get results into python dictionnairies
    Threshold = 1e-6
    var_results, var_meta = results.get_all(m, Threshold, Days, Hours, Periods)

    # Save all results to a hdf5 file
    file_name = 'results.h5'
    cd = results.make_path(objective=objective, Limit=Limit, Pareto=Pareto)
    path = os.path.join(cd, file_name)
    results.save_df_to_hdf5(var_results, var_meta, path, Days, Hours, Periods)

    # Save a txt file containing variable results close to the model limit
    if Save_txt:
        results.save_txt(m, cd)

    end_write = time.time()
    print('model write time: ', end_write - start_write, 's')

    # Plot graphs and save xls files of results
    start_plot = time.time()
    if Plot:
        plot.all_fig(path, save_fig=True)
    plot.units_and_resources(path, fig_path=None)
    if Summary:
        results.summary(path, save=True)
    end_plot = time.time()

    # Save info about the run
    file_name = 'run_info.txt'
    info = {
        0: f'Objective:     {objective}',
        1: f'Limit:         {Limit}',
        2: f'Solve Time:    {end_solve - start_solve}',
        3: f'Write Time:    {end_write - start_write}',
        4: f'Plot Time:     {end_plot - start_plot}'
    }

    with open(os.path.join(cd, file_name), 'w') as f:
        for k in info.keys():
            print(info[k], file=f)
            # for v in m.getVars():
            #     if v.x > 2000:
            #         print('Warning high variable value', file=f)
            #         print('{}: {:.0f}'.format(v.VarName, v.x), file=f)
    return path
示例#34
0
def run(parameters, multi=True):
    return model.run(multi=multi, **parameters)
        if args.color == 'original':
            raise AssertionError()

    if args.mode != 'artist_style':

        if args.artist == 'artist1':
            style_path = artist1_style_path
        elif args.artist == 'artist2':
            style_path = artist2_style_path
        elif args.artist == 'artist3':
            style_path = artist3_style_path

        best, best_loss = model.run(content_path,
                                    style_path,
                                    args.algorithm,
                                    args.mode,
                                    args.color,
                                    args.artist,
                                    iteration=1000)
    else:

        if args.artist == 'artist1':
            style_path_arr = artist1_style_path_arr
        elif args.artist == 'artist2':
            style_path_arr = artist2_style_path_arr
        elif args.artist == 'artist3':
            style_path_arr = artist3_style_path_arr

        best, best_loss = model.run(content_path,
                                    style_path_arr,
                                    args.algorithm,
示例#36
0
if lightOption == 'ACTUAL':
    globalsr = df['GHI']
    #extracts global solar radiation column from Excel Worksheet in W/m^2
    srInp = list(globalsr.values)
    #srInp = np.tile(srInp, 2)
else:
    srInp = 0

if rainOption == 'ACTUAL':
    rain = df['Rain']
    rInp = list(rain.values)
else:
    rInp = 0

#Run model
data = model.run(species, sType, capOn, weatherOption, lightOption, rainOption,
                 Duration, tempInp, qaInp, srInp, rInp, s0)

#Save data
data.to_pickle(resultsFile.get())

#Display output graphically
timestepM = 30
startDay = 0
endDay = Duration
dispDuration = endDay - startDay
daySteps = 60 / timestepM * 24
timevec = np.linspace(0, Duration, Duration * daySteps)
timevecHr = np.linspace(0, Duration * 24, Duration * daySteps)

# anp = plt.figure()
# plt.title("Carbon assimilation")
示例#37
0
    def run_game(self, u):
        seed = seeds[u]
        acts = actions[u]

        return model.run(seed, *acts)