Ejemplo n.º 1
0
 def __init__(self, address):
     self.address = address
     self.loadList = []
     with open(self.configFile) as data:
         map = json.load(data)
     common_house_address = map['mainStation'][address]['house']
     common_solar_address = map['mainStation'][address]['solar']
     self.loadList.append(Load(common_house_address, self.commonHouseProfit))
     self.loadList.append(Load(common_solar_address, self.commonSolarCost))
Ejemplo n.º 2
0
def main(argv):
    message = OrderedDict({
        "Network": FLAGS.network,
        "data": FLAGS.data,
        "epoch": FLAGS.n_epoch,
        "batch_size": FLAGS.batch_size,
        "Optimizer": FLAGS.opt,
        "learning_rate": FLAGS.lr,
        "Denoising": FLAGS.denoise,
        "l2_norm": FLAGS.l2_norm,
        "Augmentation": FLAGS.aug
    })

    ## load dataset
    data = Load(FLAGS.data)

    ## setting models
    encode, decode = set_model(outdim=40, size=data.size, channel=data.channel)
    model = eval(FLAGS.network)(encode=encode,
                                decode=decode,
                                denoise=FLAGS.denoise,
                                size=data.size,
                                channel=data.channel,
                                name=FLAGS.network,
                                out_dim=data.output_dim,
                                lr=FLAGS.lr,
                                opt=FLAGS.opt,
                                trainable=True)

    #training
    trainer = AETrainer(FLAGS, message, data, model, FLAGS.network)
    trainer.train()
    return
Ejemplo n.º 3
0
def objective(trial):
    tf.reset_default_graph()
    param = {
        'opt' : trial.suggest_categorical('opt', ['SGD','Momentum','Adadelta','Adagrad','Adam','RMSProp']),
        'lr' : trial.suggest_loguniform('lr', 8e-5, 8e-2),
        'batch_size' : trial.suggest_categorical('batch_size', [64, 96 ,128]),
        'aug': trial.suggest_categorical('aug', ['None','shift','mirror','rotate','shift_rotate','cutout']),
        'l2': trial.suggest_categorical('l2', ['True','False'])
    }

    FLAGS.aug = param['aug']
    FLAGS.l2_norm = param['l2']
    FLAGS.batch_size = param['batch_size']

    # prepare training
    ## load dataset
    data = Load(FLAGS.data)

    ## setting models
    model_set = set_model(data.output_dim)
    model = eval(FLAGS.network)(model=model_set, name=FLAGS.network, out_dim=data.output_dim, lr=param['lr'], opt=param['opt'], trainable=True)

    #training
    trainer = OptunaTrain(FLAGS=FLAGS, message=None, data=data, model=model, name='tuning')
    test_accuracy = trainer.train()
    return -test_accuracy
Ejemplo n.º 4
0
    def test_clear(self):
        self.input_file = StringIO()
        self.input_file.write(
            'Text<value>.SF NS Text,13,-1,5,50,0,0,0,0,0<value>Text<value>48<value>218<value>4278190335<value>Group1\n'
        )
        self.input_file.write(
            'Ellipse<value>237<value>277<value>344<value>392<value>4278190335<value>Group1\n'
        )
        self.input_file.write(
            'Line<value>315:58;315:59;315:61;317:67;321:72;324:'
            '76;332:93;342:108;351:117;357:124;360:129;365:'
            '134;367:137;368:140;370:143;373:147;375:151;375:'
            '152;376:152;377:156;377:157;377:158<value>'
            '4294901760<value>Group2\n')
        self.input_file.write(
            'Rect<value>176<value>131<value>92<value>50<value>4294901760<value>Group2\n'
        )
        self.input_file.seek(0, 0)

        app = QApplication(sys.argv)
        mainwindow = MainWindow()
        load = Load(self.input_file, mainwindow.piirtoalusta)
        self.input_file.close()
        mainwindow.menubar.clear()
        items = mainwindow.piirtoalusta.scene.items()

        self.assertEqual(0, len(items), "Undo failed. Item still there")
Ejemplo n.º 5
0
def etl_fact_macro_details(source_engine, target_engine):
    """fact_macro_details的etl主函数

    从235 tag_detail表etl到240 fact_macro_details表
    :param source_engine: 源数据库引擎
    :param target_engine: 目标数据库引擎
    """
    extract = Extract(source_engine, target_engine)
    transform = Transform()
    load = Load(target_engine)
    record = Record(table='fact_macro_detail', record_path='rec.cfg')

    start_params = record.get_record()
    divisions = extract.std_divisions()

    for i in range(start_params['rounds']):
        start_id = start_params['update_id'] + i * start_params['chunksize'] + 1
        end_id = start_params['update_id'] + (
            i + 1) * start_params['chunksize'] + 1

        tag_details = extract.tag_details(start_id, end_id)
        if len(tag_details) == 0:
            continue
        macro_details = transform.compile_datasets(tag_details, divisions)
        load.loading(macro_details)
        update_id = tag_details['id'].max() if tag_details['id'].max(
        ) else start_params['update_id']
        record.update_record(update_id)
Ejemplo n.º 6
0
def run_etl(filename):
    logger.info("application ran")
    start = time.time()
    app = Extract()
    raw_data_list = app.get_data_from_bucket(filename) # extract output
    end_extract = time.time()
    extract_time = round(end_extract - start, 4)
    print(f"Extract time: {extract_time}")
    logger.info(f"Extract time: {extract_time}")
    apple = Transform()
    transformed_data, transformed_drink_menu_data = apple.transform_new_data(raw_data_list) # raw data into transform returns transformed data and drinks dic

    end_transform = time.time()
    transform_time = round(end_transform - end_extract,4)
    logger.info(f"Transform time: {transform_time}")
    print(f"Transform time: {transform_time}")
    appley = Load()

    appley.save_transaction(transformed_data) # populate RDS instance with cleaned data.
    appley.save_drink_menu(transformed_drink_menu_data) # generate drinks menu
 
    end_load = time.time()
    load_time = round(end_load - end_transform, 4)
    logger.info(f"Loading time: {load_time}")
    total_time = extract_time + transform_time + load_time
    logger.info(f"total time: {total_time}")
    print(f"Load time: {load_time}\nTotal time: {total_time}")
Ejemplo n.º 7
0
 def resume(self):
     self.resume_check = True
     self.main.activeObj = set()
     print("LOADING THE GAME")
     load_game = Load(self.main)
     load_game.load()
     print(self.resume_check)
     #need to get values for these to actually work
     self.main.color = load_game.getColor()
     self.main.numPlayers = load_game.getNumPlayers()
     self.main.activeObj = set()
     self.main.board = Board(self.main)
     self.main.game = Game(self.main)
     self.main.deck = Deck(self.main)
     self.main.deck.start_deck()
     load_game.set_values()
     self.main.game.playing.playerInfoList = self.main.game.playing.getPlayerInfoList(
         self.main.game.playerNum)
     self.main.game.playing.relaxedButton.visible = False
     # print("6")
     # self.main.save = Save(self.main)
     # print("7")
     # self.main.save.save()
     print("DONE LOADING")
     self.main.gameStarted = True
Ejemplo n.º 8
0
def Load_Cfagent(defaults):
    with Load(defaults["load_name"], num=defaults['num']) as load:
        collector, env, mover, teleporter, CFagent = load.items(Collector, Game, Mover, Teleporter, CFAgent)
        buffer = ReplayBuffer(**defaults)
        CFbuffer = CFReplayBuffer(**defaults)

        with Save(env, collector, mover, teleporter, CFagent, **defaults) as save:
            intervention_idx, modified_board = teleporter.pre_process(env)
            dones = CFagent.pre_process(env)
            CF_dones, cfs = None, None
            CFagent.CF_count = 0
            for frame in loop(env, collector, save, teleporter):
                CFagent.counterfact(env, dones, teleporter, CF_dones, cfs)
                modified_board = teleporter.interveen(env.board, intervention_idx, modified_board)
                actions = mover(modified_board)
                observations, rewards, dones, info = env.step(actions)
                modified_board, modified_rewards, modified_dones, teleport_rewards, intervention_idx = teleporter.modify(observations, rewards, dones, info)
                buffer.teleporter_save_data(teleporter.boards, observations, teleporter.interventions, teleport_rewards, dones, intervention_idx)
                mover.learn(modified_board, actions, modified_rewards, modified_dones)
                board_before, board_after, intervention, tele_rewards, tele_dones = buffer.sample_data()
                teleporter.learn(board_after, intervention, tele_rewards, tele_dones, board_before)
                collector.collect([rewards, modified_rewards, teleport_rewards], [dones, modified_dones])
                CF_dones, cfs = CFagent.counterfact_check(dones, env, **defaults)
                CFbuffer.CF_save_data(CFagent.boards, observations, CFagent.counterfactuals, rewards, dones, CF_dones)
                CFboard, CFobs, cf, CFrewards, CFdones1 = CFbuffer.sample_data()
                CFagent.learn(CFobs, cf, CFrewards, CFdones1, CFboard)
Ejemplo n.º 9
0
def etl_fact_draw_main(engine_source, engine_target,chunksize=5000,record_file='etl_fact_draw.record'):
    """绘图事实表的ETL

    :param engine_source: 源数据库引擎
    :param engine_target: 目标数据库引擎
    """
    extract = Extract(engine_source,chunksize,record_file)
    transform = Transform()
    load = Load(engine_target)
    # 抽取数据
    df_industry,df_draw_gen = extract.extract_main()
    logging.info('Extract datasets completed.')

    for k,df_draw in enumerate(df_draw_gen,1):
        logging.info('Round %d, From obs.%d to obs.%d,start.' % \
                     (k, (k-1)*chunksize, k*chunksize))
        # 清理、转换数据
        df_clean = transform.transform_main(df_industry, df_draw)
        logging.info('Round %d, Data cleaning completed.'%k)

        try:
            load.load_main(df_clean)
            logging.info('Round %d, loading %d obs. Secceed '%(k,len(df_clean)))
            with open(record_file,'w') as f:
                f.write(str(max(df_draw['id'])))
        except Exception as e:
            df_clean[['drawGuid', 'marketGuid']].to_csv('unsecceed_samples.csv', mode='a',index=False)
            logging.error('Round %d,%s' %(k,e))
            raise
Ejemplo n.º 10
0
def run_etl(filename):
    logger.info("application ran")
    start = time.time()
    app = Extract()
    # Command to extract data from csv via s3 bucket:
    raw_data_list = app.get_data_from_bucket(filename)
    # Commands to load data from RDS:
    # raw_data_list = app.load_yesterdays_data() # extract output from yesterday
    # raw_data_list = app.load_all_data()  # extract output from all time
    end_extract = time.time()
    extract_time = round(end_extract - start, 4)
    print(f"Extract time: {extract_time}")
    logger.info(f"Extract time: {extract_time}")
    apple = Transform()
    transformed_data, new_drinks, new_locations, basket = apple.transform(raw_data_list) # raw data into transform returns transformed data and drinks dic
    # transformed_data, basket = apple.transform(raw_data_list) # raw data into transform returns transformed data and drinks dic

    end_transform = time.time()
    transform_time = round(end_transform - end_extract,4)
    logger.info(f"Transform time: {transform_time}")
    print(f"Transform time: {transform_time}")
    appley = Load()

    # appley.save_transaction(transformed_data) # populate RDS instance with cleaned data.
    # appley.save_drink_menu(new_drinks) # generate drinks menu
    # appley.save_location_menu(new_locations) # generate locations menu
    # appley.save_basket(basket) # generate drinks menu


    end_load = time.time()
    load_time = round(end_load - end_transform, 4)
    logger.info(f"Loading time: {load_time}")
    total_time = extract_time + transform_time + load_time
    logger.info(f"total time: {total_time}")
    print(f"Load time: {load_time}\nTotal time: {total_time}")
Ejemplo n.º 11
0
    def __init__(self, P_Builder):
        self.Builder = P_Builder
        self.Load = Load()
        self.File = File()
        self.Save = Save(self.File)
        self.Kanban = Kanban()
        self.Graphical_Kanban = None

        self.action_flag = None
        self.Temp_Widget_Reference = None
Ejemplo n.º 12
0
def etl_fact_market(*args):
    """fact_market表主函数
    
    :param args: 按位参数engine_zone_macro,engine_draw,engine_target
    """
    # 初始化 extract,transform和load三个对象
    extract = Extract(engine_zone_macro, engine_draw, engine_target)
    transform = Transform()
    load = Load(engine_target)

    # 抽取已经经过etl的商圈
    done_market = extract.done_market()
    df_tag_counts = extract.tag_counts()
    df_industry = extract.industry()
    has_dealed = []

    for i, sample_tag_counts in df_tag_counts.iterrows():

        grandParentId = sample_tag_counts['grandParentId']
        if len(grandParentId) != 36:  # 判断grandParentId的有效性
            logging.warning('Round %d, %s is invalid ,skipped.' %
                            (i, grandParentId))
            continue

        elif grandParentId in done_market:  # 判断该商圈是已经经过etl
            logging.warning('Round %d, %s etl before' % (i, grandParentId))
            continue

        if grandParentId in has_dealed:
            logging.warning('Round %d, %s etl before' % (i, grandParentId))
            continue
        else:
            has_dealed.append(grandParentId)

        # 抽取数据
        zone_grandparent = extract.zone_grandparent(grandParentId)
        if len(zone_grandparent) == 0:
            logging.warning('Round %d, has no draw samples' % i)
            continue
        rent = extract.rent_details(grandParentId)
        industry_tmp = df_industry[df_industry['grandParentId'] ==
                                   grandParentId]
        # 转换数据
        rent = transform.rent_calculate(rent)
        industry_dict = transform.reshape_industry(industry_tmp)
        # 组合数据
        clean = transform.compile_dfs(sample_tag_counts, rent, industry_dict,
                                      zone_grandparent)
        try:
            load.loading(clean)
            logging.info('Round %d, %s etl secceed' % (i, grandParentId))
        except Exception as e:
            logging.error('Round %d, %s' % (i, e))
Ejemplo n.º 13
0
def etl_dimension_time(target_engine):
    """时间维度表主函数

    :param target_engine: 目标数据库引擎
    """
    extract = Extract()
    transform = Transform()
    load = Load(target_engine)

    full_time = extract.gen_full_time()
    time_table = transform.gen_date(full_time)
    load.loading(time_table)
Ejemplo n.º 14
0
 def loadFile(self):
     input_file = None
     try:
         path = QFileDialog.getOpenFileName(self.piirtoalusta)[0]
         input_file = open(path)
     except OSError:
         print("Could not open {}".format(path))
     else:
         Load(input_file, self.piirtoalusta)
     finally:
         if input_file:
             input_file.close()
Ejemplo n.º 15
0
def etl_demension_division(target_engine):
    """division表的etl主函数

    从统计局爬取的标准csv表中抽取数据,载入到数据仓库
    :param target_engine:目标数据库引擎
    """
    extract = Extract()
    transform = Transform()
    load = Load(target_engine)
    logging.info('Initialize three instances')

    division_datasets = extract.std_divisions()
    std_districts = transform.std_districts(division_datasets)
    load.loading(std_districts)
Ejemplo n.º 16
0
    def test_save(self):
        incontent = 'Text<value>.SF NS Text,13,-1,5,50,0,0,0,0,0<value>Text<value>48<value>218<value>4278190335<value>Group1\n'
        self.input_file = StringIO()
        self.input_file.write(incontent)
        self.input_file.seek(0, 0)

        app = QApplication(sys.argv)
        mainwindow = MainWindow()
        load = Load(self.input_file, mainwindow.piirtoalusta)
        self.input_file.close()
        mainwindow.menubar.files.filename = '/Users/Tuomas/Python3/Y2-Piirustusohjelma/testi2.txt'
        outcontent = mainwindow.menubar.files.saveFile()

        self.assertEqual(incontent, outcontent,
                         "Loading data failed. Data does not match")
Ejemplo n.º 17
0
def main():

    if len(sys.argv) != 3:
        print('Error: Execution -> python3 main.py <url> <name_database>')
        exit(1)

    url = sys.argv[1]
    name_db = sys.argv[2]

    transformation = Transformation(url=url,
                                    output_path='databases/',
                                    name_db=name_db)
    transformation.transformation()

    load = Load(transformation.new_engine)
    load.load(output_path='excel/')
Ejemplo n.º 18
0
    def __init__(self):
        self.b = Battery(constants.battery_capacity,constants.battery_max_charge,constants.battery_max_discharge,constants.nbatt,constants.nbatt_c,constants.nbatt_d,\
            constants.battery_cost, constants.life_time, constants.round_trip)
        self.g = GasTurbine(constants.gas_turbine_max, constants.microgas_turbine_om_cost, constants.fual_cost, constants.co2_coe, constants.co2_cost, \
            constants.so2_coe, constants.so2_cost, constants.no_coe, constants.no_cost)
        self.l = Load(constants.shortage_cost)
        self.p = PV(constants.pv_max, constants.pv_om_cost)
        self.w = WindTurbine(constants.wind_turbine_max,
                             constants.wind_turbine_om_cost)
        self.m = MEMS(self.b, self.g, self.l, self.p, self.w)
        self.bm = BaseMEMS(self.b, self.g, self.l, self.p, self.w)

        self.l.set_forecast(constants.load_important_forecast,
                            constants.load_transferable_forecast)
        self.p.set_forecast([ir / 0.2 * 1000 for ir in constants.pv_forecast])
        self.w.set_forecast(
            [0.2 * wind_speed**3 for wind_speed in constants.wind_forecast])
Ejemplo n.º 19
0
def etl_fact_market(source_engine, target_engine, rec_path):

    extract = Extract(source_engine, target_engine)
    transform = Transform()
    load = Load(target_engine)
    record = Record('rec.cfg')

    start_params = record.get_record()
    unique_marketguid = []
    done_market = []
    has_dealed = []

    for i, grandParentId in enumerate(unique_marketguid):

        if len(grandParentId) != 36:  # 判断grandParentId的有效性
            logging.error('Round %d, %s is not valid.' % (i, grandParentId))
            continue

        elif grandParentId in done_market:  # 判断该商圈是已经经过etl
            logging.warning('Round %d, %s etl before' % (i, grandParentId))
            continue

        if grandParentId in has_dealed:
            logging.warning('Round %d, %s etl before' % (i, grandParentId))
            continue
        else:
            has_dealed.append(grandParentId)

        zone_grandparent = extract.zone_grandparent(grandParentId)
        if len(zone_grandparent) == 0:
            logging.warning('Round %d, has no draw samples' % i)
            continue

        rent = extract.rent_details(grandParentId)
        industry_tmp = industry[industry['grandParentId'] == grandParentId]
        # 转换数据
        rent = transform.rent_calculate(rent)
        industry_dict = transform.reshape_industry(industry_tmp)
        # 组合数据
        clean = transform.compile_dfs(sample_tag_counts, rent, industry_dict,
                                      zone_grandparent)
        try:
            load.loading(clean)
        except Exception as e:
            logging.error('Round %d, %s' % (i, e))
Ejemplo n.º 20
0
def test_graphTrain(
    name: str
) -> tuple[dict[LayerType, dict[frozenset[LayerType], float]],
           frozenset[LayerType]]:
    data = {}
    with Load(name, num=0) as load:
        env, dataN = load.items(Game, Data)
        for layer, states in dataN.data.items():
            data[layer] = {}
            for state in states:
                p = dataN.p(layer, state)
                if p != 0 and not any([
                        dataN.p(layer, small) != 0
                        for small in compress(state, inclusiv_self=False)
                ]):
                    data[layer][state] = p
                else:
                    data[layer][state] = 0
    return data, environments[env.level][2]
Ejemplo n.º 21
0
    def __init__(self, h5n):
        self._h5n = h5n  # type: H5Nastran

        self.constraint = Constraint(self._h5n, self)
        # self.contact = Contact(self.h5n, self)
        self.coordinate_system = CoordinateSystem(self._h5n, self)
        self.design = Design(self._h5n, self)
        # self.domains = None
        self.dynamic = Dynamic(self._h5n, self)
        self.element = Element(self._h5n, self)
        # self.fatigue = None
        self.load = Load(self._h5n, self)
        self.material = Material(self._h5n, self)
        # self.matrix = None
        # self.modules = None
        self.node = Node(self._h5n, self)
        self.parameter = Parameter(self._h5n, self)
        # self.partition = None
        self.property = Property(self._h5n, self)
        self.table = Table(self._h5n, self)
Ejemplo n.º 22
0
    def reset(self):
        self.__data = {}
        from os.path import expanduser
        idFileName = expanduser("~") + '/.hidevid'
        fileExists = os.path.exists(idFileName)
        if (fileExists):
            idFileHandle = open(idFileName, 'r')
            deviceId = idFileHandle.readline()
            deviceId = deviceId.strip()
            self.__data['id'] = deviceId
            idFileHandle.close

        now = datetime.datetime.now()
        self.__data['seq'] = int(round(time.time() * 1000))
        self.__data['tsClient'] = now.isoformat()
        cinfo = cpuinfo.get_cpu_info()
        self.__data['cpu'] = format(cinfo['brand'])
        self.__data['cpuCount'] = cinfo['count']
        osinfo = OsInfo()
        oinfo = osinfo.getOsInfo(cinfo)
        self.__data['os'] = oinfo['os']
        self.__data['osDist'] = oinfo['dist']
        self.__data['osVersion'] = oinfo['version']
        self.__data['osArch'] = oinfo['arch']
        self.__data['osKernel'] = oinfo['kernel']
        self.__data['cpuTemp'] = cputemp.get_cpu_temp()
        l = Load()
        self.__data['cpuLoad'] = l.getCpuLoad()
        MemInfo = meminfo.getMemoryStatus()
        self.__data['memAvail'] = MemInfo['memAvail'] / 1024
        self.__data['memUsed'] = MemInfo['memUsed'] / 1024
        self.__data['swapAvail'] = MemInfo['swpAvail'] / 1024
        self.__data['swapUsed'] = MemInfo['swpUsed'] / 1024
        self.__data['storage'] = l.getStorageStatus()
        self.__data['network'] = netinfo.get_network_interfaces()
        CpuTimes = cputimes.get_cpu_times()
        self.__data['cpuUser'] = CpuTimes['user']
        self.__data['cpuSystem'] = CpuTimes['system']
        self.__data['cpuIdle'] = CpuTimes['idle']
        self.__data['ioWait'] = CpuTimes['iowt']
        self.__data['UpTime'] = CpuTimes['uptime']
Ejemplo n.º 23
0
def main(args):
    print("------------Start Evaluation-----------")
    print("CheckPoint : {}".format(FLAGS.ckpt_dir))
    print("Network : {}".format(FLAGS.network))
    print("data : {}".format(FLAGS.data))
    print("---------------------------------------")

    # load dataset
    data = Load(FLAGS.data)
    batch_size = 100
    dataset = data.load(data.x_train,
                        data.y_train,
                        batch_size=batch_size,
                        is_training=True)
    iterator = dataset.make_initializable_iterator()
    inputs, labels = iterator.get_next()
    test_inputs = tf.random.uniform([batch_size * 3, FLAGS.z_dim], -1, +1)
    index = tile_index(batch_size * 3)

    model = eval(FLAGS.network)(z_dim=FLAGS.z_dim,
                                size=data.size,
                                channel=data.channel,
                                lr=0.0,
                                class_num=data.output_dim,
                                conditional=FLAGS.conditional,
                                opt=None,
                                trainable=False)

    D_logits, D_logits_ = model.inference(inputs, batch_size, labels)
    G = model.predict(test_inputs, batch_size * 3, index)

    tf.train.Saver()
    with tf.Session() as sess:
        utils = Utils(sess=sess)
        utils.initial()
        if utils.restore_model(FLAGS.ckpt_dir):
            image = sess.run(G)
            utils.gan_plot(image)
            return
        else:
            return
Ejemplo n.º 24
0
    def order_cluster_by_load(self, cluster_list):
        # Sample salt output
        # {'dlceph01.drwg.local': '0.27 0.16 0.15 1/1200 26234'}

        # define grammar
        point = Literal('.')
        number = Word(nums)
        floatnumber = Combine(number + point + number)
        float_list = OneOrMore(floatnumber)

        results = self.salt_client.cmd(','.join(cluster_list),
                                       'cmd.run', ['cat /proc/loadavg'],
                                       expr_form='list')
        load_list = []
        self.logger.debug("Salt load return: {load}".format(load=results))

        for host in results:
            host_load = results[host]
            match = float_list.parseString(host_load)
            if match:
                one_min = match[0]
                five_min = match[1]
                fifteen_min = match[2]
                self.logger.debug(
                    "Adding Load({host}, {one_min}, {five_min}, {fifteen_min}".
                    format(host=host,
                           one_min=one_min,
                           five_min=five_min,
                           fifteen_min=fifteen_min))
                load_list.append(Load(host, one_min, five_min, fifteen_min))
            else:
                self.logger.error("Could not parse host load output")

        # Sort the list by fifteen min load
        load_list = sorted(load_list, key=lambda x: x.fifteen_min_load)
        for load in load_list:
            self.logger.debug("Sorted load list: " + str(load))

        return load_list
Ejemplo n.º 25
0
def market_to_api2(source, target, record_file='api2.record'):
    """anti_fraud数据库api2表的etl主函数
    
    :param source: 源数据库引擎
    :param target: 目标数据库引擎
    :param record_file: 负责记录装载id的文件名,默认为 app2.record
    """
    # 初始化对象
    extract = Extract(source, target, record_file)
    transform = Transform()
    load = Load(target, record_file)

    # 抽取数据
    market_df = extract.market()
    draw_samples = extract.draw_samples()

    # 转换数据
    reshaped_market = transform.reshape_market(market_df)
    aggregated_samples = transform.aggregate_from_samples(draw_samples)
    api2_df = transform.compile_dfs(reshaped_market, aggregated_samples)

    # 装载数据
    load.loading(api2_df)
Ejemplo n.º 26
0
def main(args):
    print("------------Start Evaluation-----------")
    print("CheckPoint : {}".format(FLAGS.ckpt_dir))
    print("Network : {}".format(FLAGS.network))
    print("data : {}".format(FLAGS.data))
    print("---------------------------------------")

    # load dataset
    data = Load(FLAGS.data)
    batch_size = 3000
    iteration = data.x_test.shape[0] // batch_size
    test = data.load_test(data.x_test, data.y_test, batch_size)
    test_iter = test.make_initializable_iterator()
    inputs, labels = test_iter.get_next()

    model_set = set_model(data.output_dim)
    model = eval(FLAGS.network)(model=model_set, name=FLAGS.network, out_dim=data.output_dim, lr=0, opt=None, trainable=False)
    logits = model.inference(inputs)
    logits  = tf.identity(logits, name="output_logits")
    correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    tf.train.Saver()
    with tf.Session() as sess:
        sess.run([test_iter.initializer],feed_dict={data.test_placeholder: data.x_test,
                                        data.test_labels_placeholder: data.y_test})
        utils = Utils(sess=sess)
        if utils.restore_model(FLAGS.ckpt_dir):
            avg_accuracy = 0
            for i in range(iteration):
                test_accuracy = sess.run(accuracy)
                avg_accuracy += test_accuracy
                print("accuracy_{} : {}".format(i, test_accuracy))
            print("average_accuracy : {}".format(avg_accuracy / iteration))
            return
        else:
            return
Ejemplo n.º 27
0
    parser.add_argument(
        '--logmissing',
        type=bool,
        default=True,
        help="would you like to record info about missingness? if not, set to False. ")

    return parser


parser = get_args()
args = parser.parse_args()


if __name__ == '__main__':
    try:
        load: Load = Load(args.dumpname, args.windows)

        missing_log: LogMissing = LogMissing(load)

        write: bool = bool(args.writecsv)

        logmissing: bool = bool(args.logmissing)

        if write:
            sys.stdout.write("writing...")
            load.export_csv(args.filename)
            sys.stdout.write("\r")
        else:
            print("not writing, but everything is fine. ")

        if logmissing:
Ejemplo n.º 28
0
def what_to_run(self, wijk, algo, sec_algo, NoT, vis):
    ''' Kijkt welke variant van greedy'''
    wijk = "wijk" + wijk
    load = Load(wijk, wijk)

    print()
    print("INNITIAL UPPER & LOWERBOUND (before k-means and/or HAC)")
    print()
    print(wijk)
    Solution.bounds(Solution, load.batteries, load.houses)


    if algo == "random":
        random = Random_connect(load.houses, load.batteries)
    elif algo == "greedy_output":
        greedy = Greedy(load.houses, load.batteries, "output")
    elif algo == "greedy_distance":
        greedy = Greedy(load.houses, load.batteries, "distance")
    elif algo == "greedy_priority":
        greedy = Greedy(load.houses, load.batteries, "priority")
    elif algo == "k_means_output":
        k_means = K_means2(load.houses, load.batteries, "output", "")
        k_means.results()
    elif algo == "k_means_distance":
        k_means = K_means2(load.houses, load.batteries, "distance", "")
        k_means.results()
    elif algo == "k_means_priority":
        k_means = K_means2(load.houses, load.batteries, "priority", "")
        k_means.results()
    elif algo == "HAC":
        splitter = Cluster_merge(load.houses)
        sec_algo = "escape"
        grid_visualisatie = Grid_visualizer(splitter.houses, splitter.batteries, "gridview", "Hierarchical Agglomerative Clustering", splitter.solutions, splitter.best_solution)



    else:
        print("Error running algorithm")

    if sec_algo == "hill_climber" and algo == "greedy_output" or sec_algo == "hill_climber" and algo == "greedy_distance" or sec_algo == "hill_climber"  and algo == "greedy_priority":
        hill_climber = Hill_climber(greedy.houses, greedy.batteries, NoT, 1)
        if vis == 'True':
            grid_visualisatie = Grid_visualizer(hill_climber.houses, hill_climber.batteries, "gridview", "Greedy + Hill Climber")
    elif sec_algo == "hill_climber" and algo == "k_means_output" or sec_algo == "hill_climber" and algo == "k_means_distance" or sec_algo == "hill_climber" and algo == "k_means_priority":
        hill_climber = Hill_climber(k_means.houses, k_means.batteries, NoT, 1)
        if vis == 'True':
            grid_visualisatie = Grid_visualizer(hill_climber.houses, hill_climber.batteries, "gridview", "K-Means + Hill Climber")
    elif sec_algo == "simulated_annealing" and algo == "greedy_output" or sec_algo == "simulated_annealing" and algo == "greedy_distance" or sec_algo == "simulated_annealing" and algo == "greedy_priority":
        sim = Simulated_annealing(greedy.houses, greedy.batteries, NoT, 1)
        if vis == 'True':
            grid_visualisatie = Grid_visualizer(sim.houses, sim.batteries, "gridview", "Greedy + Simulated Annealing")
    elif sec_algo == "simulated_annealing" and algo == "k_means_output" or sec_algo == "simulated_annealing" and algo == "k_means_distance" or sec_algo == "simulated_annealing" and algo == "k_means_priority":
        sim = Simulated_annealing(k_means.houses, k_means.batteries, NoT, 1)
        if vis == 'True':
            grid_visualisatie = Grid_visualizer(sim.houses, sim.batteries, "gridview", "K-Means + Simulated Annealing")
    elif sec_algo == "simulated_annealing" and algo == "HAC":
        sim = Simulated_annealing(splitter.houses, splitter.batteries, NoT, 1)
        if vis == 'True':
            grid_visualisatie = Grid_visualizer(splitter.houses, splitter.batteries, "gridview", "Hierarchical Agglomerative Clustering", splitter.solutions, splitter.best_solution)
    elif sec_algo == "hill_climber" and algo == "HAC":
        sim = Hill_climber(splitter.houses, splitter.batteries, NoT, 1)
        if vis == 'True':
            grid_visualisatie = Grid_visualizer(splitter.houses, splitter.batteries, "gridview", "Hierarchical Agglomerative Clustering", splitter.solutions, splitter.best_solution)
    elif sec_algo == "escape":
        pass

    else:
        print("Error running secondary algorithm")
Ejemplo n.º 29
0
    def utility(self, state, player):
        """ This return a value between -infinity and infinity based on how good is the state for the player"""

        player_type = player.get_type()
        player_color = player.get_color()

        if player_color is Piece.WHITE:
            if state.is_check_for_enemy(Piece.BLACK):
                if state.is_check_mate_for_enemy(Piece.BLACK):
                    return 99999999
                else:
                    return 99999999 - 100000
            if state.is_check_for_enemy(Piece.WHITE):
                if state.is_check_mate_for_enemy(Piece.WHITE):
                    return -99999999
                else:
                    return -99999999 + 100000
        elif player_color is Piece.BLACK:
            if state.is_check_for_enemy(Piece.WHITE):
                if state.is_check_mate_for_enemy(Piece.WHITE):
                    return 99999999
                else:
                    return 99999999 - 100000
            if state.is_check_for_enemy(Piece.BLACK):
                if state.is_check_mate_for_enemy(Piece.BLACK):
                    return -99999999
                else:
                    return -99999999 + 100000

        if player_type is Player.IDIOT:
            return 0

        elif player_type is Player.FIGHTER:

            if state.can_kill(player_color):
                state_quality = 50000
            else:
                state_quality = 0

            for row in state.get_board():
                for pos in row:
                    if pos is not None:
                        if pos.get_color() is player_color:
                            state_quality += Piece.VALUES[pos.get_type()]
                        else:
                            state_quality -= Piece.VALUES[pos.get_type()]
            return state_quality

        elif player_type is Player.SMARTEST or player_type is Player.HUMAN:
            db_access = Load()
            can_kill = state.can_kill(player_color)
            can_be_killed = state.can_be_killed(player_color)

            if db_access.compare_with_data_base_states(state, player):
                state_quality = 20000
            else:
                state_quality = 0

            if can_kill and not can_be_killed:
                state_quality += 15000
            elif not can_kill and not can_be_killed:
                state_quality += 10000
            elif can_kill and can_be_killed:
                state_quality -= 5000
            elif not can_kill and can_be_killed:
                state_quality -= 5000

            for row in state.get_board():
                for pos in row:
                    if pos is not None:
                        if pos.get_color() is player_color:
                            state_quality += Piece.VALUES[pos.get_type()]
                        else:
                            state_quality -= Piece.VALUES[pos.get_type()]
            return state_quality
Ejemplo n.º 30
0
    # raw_data_list = app.load_all_data()  # extract output from all time
    end_extract = time.time()
    extract_time = round(end_extract - start, 4)
    print(f"Extract time: {extract_time}")
    logger.info(f"Extract time: {extract_time}")
    apple = Transform()
    transformed_data, new_drinks, new_locations, basket = apple.transform(
        raw_data_list
    )  # raw data into transform returns transformed data and drinks dic
    # transformed_data, basket = apple.transform(raw_data_list) # raw data into transform returns transformed data and drinks dic

    end_transform = time.time()
    transform_time = round(end_transform - end_extract, 4)
    logger.info(f"Transform time: {transform_time}")
    print(f"Transform time: {transform_time}")
    appley = Load()

    appley.save_transaction(
        transformed_data)  # populate RDS instance with cleaned data.
    appley.save_drink_menu(new_drinks)  # generate drinks menu
    appley.save_location_menu(new_locations)  # generate locations menu
    appley.save_basket(basket)  # generate drinks menu

    end_load = time.time()
    load_time = round(end_load - end_transform, 4)
    logger.info(f"Loading time: {load_time}")
    total_time = extract_time + transform_time + load_time
    logger.info(f"total time: {total_time}")
    print(f"Load time: {load_time}\nTotal time: {total_time}")