Beispiel #1
0
	def on_guardar_clicked(self, widget):
		#print(self.txtNB.get_text())
		config = Config()
		config.save(self.txtNB.get_text(),self.txtGF.get_text())
		delete = Deleter(self.txtNB.get_text().replace(" ","\ "),self.txtGF.get_text().replace(" ","\ "))
		delete.tumbaTodo()
		return
Beispiel #2
0
 def __init__(self):
     asyncore.dispatcher.__init__(self)
     self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
     self.set_reuse_addr()
     self.bind((Config.get("WebServer","listen_address"), Config.getint("WebServer", "port")))
     self.listen(5) # maximum number of queued connections
     logging.info("Started WebServer on %s:%s" % ((Config.get("WebServer","listen_address"), Config.getint("WebServer", "port"))))
Beispiel #3
0
	def __init__(self, main_instance):
		AbstractRole.__init__(self, main_instance)
		self.dialog = Dialog(self)
		self.has_run = False
		self.shares = {}
		self.wm = None
		Config.read()
Beispiel #4
0
    def __init__(self):
        logging.debug("Init incubator.")
        config = Config()
        day_tmp = config.get_day()

        q = Queue()
        self.q = q
        self.pwm_controller = PWMController(q)
        self.pwm_controller.start()

        self.roller = Roller(q, config.get_roll_intervall())
        self.ssr = SSRRegulator()
        # self.io_handler = IoHandler()

        self.ventilation = Ventilation(q)
        self.ventilation.set_point(40)
        self.ventilation.start()

        self.config = Config()
        self._running = True

        if day_tmp != 1:
            self.start_time = datetime.today() - timedelta(days=day_tmp - 1)
            self.roll_time = datetime.today()
        else:
            self.start_time = datetime.today()
            self.roll_time = datetime.today()
Beispiel #5
0
    def run(self, options):
        config = Config()
        config.keep_results = options['keep_results']
        config.create_diffs = options['create_diffs']
        config.update_refs = options['update_refs']

        t = Timer()
        docs = options['tests']
        docs_dir = options['docs_dir']

        if len(docs) == 1:
            if os.path.isdir(docs[0]):
                if docs_dir is None:
                    docs_dir = docs[0]
                if docs_dir == docs[0]:
                    docs = []
            else:
                if docs_dir is None:
                    docs_dir = os.path.dirname(docs[0])
        else:
            if docs_dir is None:
                docs_dir = os.path.commonprefix(docs).rpartition(os.path.sep)[0]

        tests = TestRun(docs_dir, options['refs_dir'], options['out_dir'])
        status = tests.run_tests(docs)
        tests.summary()
        get_printer().printout_ln("Tests run in %s" % (t.elapsed_str()))

        return status
 def test_rfftStftConfig_01():
   with make_scope() as session:
     layer_name = "stft_layer"
     fft_size = 400
     frame_size = 400
     frame_shift = 160
     window = "hanning"
     test_input = np.ones((1, 32000, 2), dtype=np.float32)
     config = Config()
     config.update({
       "num_outputs": int(fft_size / 2) + 1 * test_input.shape[2],
       "num_inputs": test_input.shape[2],
       "network": {
         layer_name: {
           "class": "multichannel_stft_layer", "frame_shift": frame_shift, "frame_size": frame_size, "window": window, "fft_size": fft_size, "use_rfft": True, "nr_of_channels": 2, "is_output_layer": True}
       }})
     network = TFNetwork(config=config, train_flag=True)
     network.construct_from_dict(config.typed_value("network"))
     layer = network.layers[layer_name]
     test_output = session.run(layer.output.placeholder, {network.get_extern_data('data').placeholder: test_input})
     ref0 = _get_ref_output(test_input, fft_size, frame_size, frame_shift, window, 0, 0)
     # np.fft.rfft and tensorflow.python.ops.rfft differ a little bit in their 
     # results, thus an error margin is allowed in the result
     resultDiff = np.abs(test_output[0, 0, 0:(int(fft_size / 2) + 1)] - ref0)
     assert np.mean(resultDiff) < 0.02 
     assert np.max(resultDiff) < 1 
     pass
def test_complexLinearProjectionLayer():
  with make_scope() as session:
    n_in, n_out = 514, 128
    layer_name = "clp_layer"
    config = Config()
    config.update({
      "num_outputs": n_out,
      "num_inputs": n_in,
      "network": {
        layer_name: {
          "class": "complex_linear_projection", "nr_of_filters": n_out, "n_out": n_out, "is_output_layer": True}
      }})
    network = TFNetwork(config=config, train_flag=True)
    network.construct_from_dict(config.typed_value("network"))
    layer = network.layers[layer_name]
    assert isinstance(layer, ComplexLinearProjectionLayer)
    i_r = np.ones((1, n_in // 2))
    i_i = np.ones((1, n_in // 2)) * 0.5
    test_input = np.expand_dims(np.reshape(np.transpose(
      np.reshape(np.concatenate([i_r, i_i], axis=1), (1, 2, 257)), [0, 2, 1]), (1, 514)), 0)
    test_clp_kernel = np.ones((2, n_in // 2, 128))
    test_clp_output = session.run(
      layer.output.placeholder,
      feed_dict={network.get_extern_data('data').placeholder: test_input, layer._clp_kernel: test_clp_kernel})
    assert test_clp_output[0, 0, 0] - 6.00722122 < 1e-5
Beispiel #8
0
def run(argv):
    Options.stack.clear()

    queue = argv[1:]
    while len(queue) > 0:
        literal = queue.pop(0)
        option = get_option(literal)
        if option.__class__ in Options.stack:
            stderr.write("Option {} appears twice\n".format(option.name))
            exit(2)
        if option.requires_argument:
            if len(queue) == 0:
                stderr.write("Option {} requires an argument\n".format(option.name))
                exit(2)
            option.argument = queue.pop(0)
        Options.stack[option.__class__] = option

    if Options.Help in Options.stack:
        print_usage()

    for option in Options.stack.values():
        if option.check() is False:
            exit(2)

    config = Config(Options.Config.get_config_path())
    if not config.load():
        exit(1)
    UI(config).start()
Beispiel #9
0
def main():
	init()
	config = Config()
	if not config.getConfig(sys.argv):
		sys.exit()
	
	dt = datetime.now()
	fslog = ScanWriter('log' + os.path.sep + dt.strftime('%Y%m%d %H%M%S') + '.log',config.log)
	
	tclient = TravianClient(config, fslog)
	if config.ReLogin:
		if not tclient.login():
			print 'Invalid username or password'
			sys.exit()
	else:
		#cookie check
		strHtml = tclient.getKarteZHtml(320801)
		if strHtml.find('login') > 0 and strHtml.find(u'用户名:') > 0 and strHtml.find(u'密码:') > 0 :
			print 'Cookie time out, relogin needed. Please use -l option or try --help option.'
			sys.exit()
	
	fsVillage = ScanWriter('result' + os.path.sep + dt.strftime('%Y%m%d %H%M%S') + 'Village.csv',config.Output[0])
	fsFarm = ScanWriter('result' + os.path.sep + dt.strftime('%Y%m%d %H%M%S') + 'Farm.csv',config.Output[1])
	fsOasis = ScanWriter('result' + os.path.sep + dt.strftime('%Y%m%d %H%M%S') + 'Oasis.csv',config.Output[2])
	fsVillage.write(u'村庄,玩家,居民,联盟,x,y\n')
	fsFarm.write(u'伐木场,泥坑,铁矿场,农场,x,y\n')
	fsOasis.write(u'老鼠,蜘蛛,野猪,蛇,蝙蝠,狼,熊,鳄鱼,老虎,大象,绿洲类型,x,y\n')
	
	scaner = Scaner(config, tclient, [fsVillage,fsFarm,fsOasis])
	scaner.scan()
Beispiel #10
0
    def execute(self, parameter, chatid):
        self.script_buffer=""
        if Config.debug:
            Config.debugPrint(self.name)
        self.__parameter = parameter
        awaitingOptions = {}
        for script in self.scripts:
            if Config.pause:
                Config.debugPause()
            if script.startswith('<<') or script.startswith('[['):
                self.__doScript(chatid, script)
                if self.__choices == 2:
                    if self.script_buffer!="":
                        question=self.script_buffer
                        self.script_buffer=""
                    else:
                        question="Auswahl:"
                    awaitingOptions = self.__makeChoice(question, chatid)
                    self.nextName = "awaiting answer"
                    break
                continue
            if self.__if[-1]:
                if self.script_buffer!="":
                    self.__delay(chatid)
                    sendmessage(self.script_buffer,chatid)
                    self.script_buffer=""
                self.script_buffer=script

            if self.__jumpNow:
                break
        if self.script_buffer!="":
            self.__delay(chatid)
            sendmessage(self.script_buffer,chatid)
        return self.nextName, self.__parameter, awaitingOptions
Beispiel #11
0
    def __init__(self, getStatisticForBeast=None):
        """
        constructor --> initialising Variables
        @param getStatisticForBeast List: insert a tupel of chars ('a','b')
        """
        threading.Thread.__init__(self)
        self.server = None
        self.gui = None
        self.beastObjectMap = {}
        self.rankingList = []
        self.worldMap = None
        self.foodCounter = 0
        self.enableUrwidVisualisation = Config.__getUseUrwidVisualisation__()
        self.useNetworking = Config.__getUseNetworking__()
        self.startTimeMillis = time.time() + Config.__getStartInSeconds__()
        self.startTime = time.ctime(self.startTimeMillis)
        self.roundCounter = 0
        self.getStatisticForBeast = getStatisticForBeast
        self.useBeastAnalytics = True if(getStatisticForBeast) else False
        self.deadBeasts = 0
        self.urwidRoundDelay = Config.__getUrwidRoundDelay__() / 1000.0
        self.gameStarted = False
        self.gameFinished = False
        self.running = False
        self.log = logging.getLogger('beast-arena-logging')

        if self.enableUrwidVisualisation:
            self.useBeastAnalytics = True
            self.getStatisticForBeast = ''
        self.beastAnalytics = BeastAnalytics() if(self.useBeastAnalytics) else False
Beispiel #12
0
 def __init__(self):
     asynchat.async_chat.__init__(self)
     self.data = []
     self.logonce = True
     self.scheduler = (Config.get("Networking", "listen_address"), Config.getint("Networking", "port"))
     self.doConnect()
     self.set_terminator("#EOM#")
Beispiel #13
0
 def onInit(self):
     self.dataPath     = self.config.getDataDir()
     config            = Config(self.config.data(), self.dataPath)
     self.plots        = config.getPlots()
     self.graphs       = config.getGraphs()
     self.interval     = self.determineInterval()
     log.info('Service started with %s plots, %s graphs, interval %s' % (len(self.plots), len(self.graphs), self.interval))
Beispiel #14
0
 def stop(self):
     env = C.get('env');
     host = C.get('rds_host_' + env)
     db = C.get('rds_db_' + env)
     rds = Redis(host = host, port = 6379, db = db)
     tradeChannel = C.get('channel_trade')
     rds.publish(tradeChannel, 'stop');
Beispiel #15
0
def main():
    logging.getLogger().setLevel(logging.DEBUG)
    logging.info("Starting...")
    config = Config("/etc/pyvision/server.conf")
    config.load()
    dispatch.AsServer(port=8080)
    logging.info("Exiting...")
def test_config1_basic():
  config = Config()
  config.update(config1_dict)
  desc = LayerNetworkDescription.from_config(config)
  assert_is_instance(desc.hidden_info, list)
  assert_equal(len(desc.hidden_info), len(config1_dict["hidden_size"]))
  assert_equal(desc.num_inputs, config1_dict["num_inputs"])
Beispiel #17
0
class Interface:
    def __init__(self, busnum=-1, addr=0x20, debug=False):
        self.lcd = LCD(busnum, addr, debug)
        self.config = Config()
        self.lcd.backlight(int(self.config.get('Display', 'Colour')))
        self.lcd.clear()

    def scytheHome(self):
        self.display('<> Scythe L/R\n^v Choose image')

    def chooseImage(self, imageName):
        output = 'Image chosen:\n' + imageName
        self.display(output)

    def display(self, text):
        self.lcd.display()
        self.lcd.backlight(int(self.config.get('Display', 'Colour')))
        self.lcd.clear()
        self.lcd.message(text)

    def buttons(self):
        return self.lcd.buttons()

    def off(self):
        self.lcd.backlight(self.lcd.OFF)
        self.lcd.noDisplay()
Beispiel #18
0
def test_Updater_add_check_numerics_ops():
  class _Layer(DummyLayer):
    def _get_loss_value(self):
      return tf.log(self.x)

  from TFNetwork import TFNetwork, ExternData
  from Config import Config

  with make_scope() as session:
    config = Config()
    config.set("debug_add_check_numerics_ops", True)
    network = TFNetwork(extern_data=ExternData(), train_flag=True)
    network.add_layer(name="output", layer_class=_Layer, initial_value=1.0)
    network.initialize_params(session=session)

    updater = Updater(config=config, network=network)
    updater.set_learning_rate(1.0, session=session)
    updater.set_trainable_vars(network.get_trainable_params())
    updater.init_optimizer_vars(session=session)
    # Should succeed.
    session.run(updater.get_optim_op())
    # One gradient descent step from ln(x), x = 1.0: gradient is 1.0 / x, thus x - 1.0 = 0.0.
    assert_almost_equal(session.run(network.get_default_output_layer().output.placeholder), 0.0)

    try:
      # Now, should fail.
      session.run(updater.get_optim_op())
    except tf.errors.InvalidArgumentError as exc:
      print("Expected exception: %r" % exc)
    else:
      assert False, "should have raised an exception"
Beispiel #19
0
def create_first_epoch(config_filename):
  config = Config()
  config.load_file(config_filename)
  engine = Engine([])
  engine.init_train_from_config(config=config, train_data=None)
  engine.epoch = 1
  engine.save_model(engine.get_epoch_model_filename(), epoch=engine.epoch)
Beispiel #20
0
        def normalizeall (self,input,output,rangeb,rangea):# Currently disabled
        #VERIFICAR WHILE DEBIDO A QUE LOS QUIERIES SON < y no <=
            config=Config()
            INCREMENT=config.getCronTime()
            start=rangeb
            end=rangea
            join = [[0 for i in range(7)] for j in range(((end-start)/300)-1)]
            c1=c2=x=0
            start =( start - start%INCREMENT) + INCREMENT # CHANGE TO INCREMENT
            while start <end :
                if c1<len(input) and input[c1][3]==start :
                    join[x][0]=input[c1][0]
                    join[x][2]=input[c1][1]
                    join[x][4]=input[c1][2]
                    c1=c1+1
                else:
                    join[x][0]=0
                    join[x][2]=0
                    join[x][4]=0
                if c2<len(output) and output[c2][3]==start  :
                    join[x][1]=output[c2][0]
                    join[x][3]=output[c2][1]
                    join[x][5]=output[c2][2]
                    c2=c2+1
                else:
                    join[x][1]=0
                    join[x][3]=0
                    join[x][5]=0

                join[x][6]=start
                start=start+INCREMENT
                x=x+1

            t=tuple(tuple(x) for x in join)
            return t
 def test_stftConfig_multi_res_02():
   with make_scope() as session:
     layer_name = "stft_layer"
     fft_sizes = [400, 200, 800]
     frame_sizes = [400, 200, 800]
     frame_shift = 160
     window = "hanning"
     test_input = np.random.normal(0, 0.6, (1, 3200, 2))
     num_outputs = int(np.sum([(int(fft_size / 2) + 1) * test_input.shape[2] for fft_size in fft_sizes]))
     config = Config()
     config.update({
       "num_outputs": num_outputs,
       "num_inputs": test_input.shape[2],
       "network": {
         layer_name: {
           "class": "multichannel_multiresolution_stft_layer", "frame_shift": frame_shift, "frame_sizes": frame_sizes, "window": window, "fft_sizes": fft_sizes, "use_rfft": True, "nr_of_channels": 2, "is_output_layer": True}
       }})
     network = TFNetwork(config=config, train_flag=True)
     network.construct_from_dict(config.typed_value("network"))
     layer = network.layers[layer_name]
     test_output = session.run(layer.output.placeholder, {network.get_extern_data('data').placeholder: test_input})
     assert test_output.shape[2] == num_outputs
     comparison_frame = 6
     ref00 = _get_ref_output_single_res(test_input, fft_sizes[0], frame_sizes[0], frame_shift, window, comparison_frame, 0)
     ref01 = _get_ref_output_single_res(test_input, fft_sizes[0], frame_sizes[0], frame_shift, window, comparison_frame, 1)
     ref10 = _get_ref_output_single_res(test_input, fft_sizes[1], frame_sizes[1], frame_shift, window, comparison_frame, 0)
     ref11 = _get_ref_output_single_res(test_input, fft_sizes[1], frame_sizes[1], frame_shift, window, comparison_frame, 1)
     ref20 = _get_ref_output_single_res(test_input, fft_sizes[2], frame_sizes[2], frame_shift, window, comparison_frame, 0)
     ref21 = _get_ref_output_single_res(test_input, fft_sizes[2], frame_sizes[2], frame_shift, window, comparison_frame, 1)
     ref = np.concatenate([ref00, ref01, ref10, ref11, ref20, ref21], axis=0)
     resultDiff = np.abs(test_output[0, comparison_frame, :] - ref)
     assert np.mean(resultDiff) < 0.06
     assert np.max(resultDiff) < 1 
    def getSensor(self, sensorID):
        """
            Sends a sensor info request to the Live! API with the wanted sensor"s index.
            Returns the sensor data received.
        """

        from Config import Config
        config = Config("configuration.json")
        sensorinfo = config.getItem("sensorinfo", "")

        for sensor in sensorinfo:
            if sensor["id"] == sensorID:
                return sensor


        # Debug code, uses local database
        if sensorID == "174963":
            response = {u'ignored': 0, u'protocol': u'oregon', u'name': u'Kasvari - l\xe4mmin puoli', u'editable': 1, u'lastUpdated': 1365097299, u'timezoneoffset': 10800, u'sensorId': u'139', u'data': [{u'name': u'temp', u'value': u'2.3'}, {u'name': u'humidity', u'value': u'87'}], u'id': u'174963', u'clientName': u'Koti'}
            return response

        elif sensorID == "895892":
            response = {u'ignored': 0, u'protocol': u'fineoffset', u'name': u'Kellari', u'editable': 1, u'lastUpdated': 1367497808, u'timezoneoffset': 10800, u'sensorId': u'73', u'data': [{u'name': u'temp', u'value': u'13.1'}], u'id': u'895892', u'clientName': u'Kasvari'}
            return response
        else:
            return {"error": "Sensor doesn't exist"}

        return {"error": "Sensor doesn't exist"}

        return self.request("sensor/info", {"id": sensorID})
def test_NetworkDescription_to_json_config1():
  config = Config()
  config.update(config1_dict)
  desc = LayerNetworkDescription.from_config(config)
  desc_json_content = desc.to_json_content()
  pprint(desc_json_content)
  assert_in("hidden_0", desc_json_content)
  assert_equal(desc_json_content["hidden_0"]["class"], "forward")
  assert_in("hidden_1", desc_json_content)
  assert_in("output", desc_json_content)
  orig_network = LayerNetwork.from_description(desc)
  assert_in("hidden_0", orig_network.hidden)
  assert_in("hidden_1", orig_network.hidden)
  assert_equal(len(orig_network.hidden), 2)
  assert_is_instance(orig_network.hidden["hidden_0"], ForwardLayer)
  assert_equal(orig_network.hidden["hidden_0"].layer_class, "hidden")
  orig_json_content = orig_network.to_json_content()
  pprint(orig_json_content)
  assert_in("hidden_0", orig_json_content)
  assert_equal(orig_json_content["hidden_0"]["class"], "hidden")
  assert_in("hidden_1", orig_json_content)
  assert_in("output", orig_json_content)
  new_network = LayerNetwork.from_json(
    desc_json_content,
    config1_dict["num_inputs"],
    {"classes": (config1_dict["num_outputs"], 1)})
  new_json_content = new_network.to_json_content()
  if orig_json_content != new_json_content:
    print(dict_diff_str(orig_json_content, new_json_content))
    assert_equal(orig_json_content, new_network.to_json_content())
def setup_backtest():
    """"This function gets all the information from the command line and ini file """

    from optparse import OptionParser
    p = OptionParser(usage='usage: %prog [options] [START_DATE] [END_DATE]')
    p.add_option('-c', '--config', dest='config', metavar='CONFIGFILE', help='Specifies input config file')
    p.add_option('-s', '--silent', dest='silent', action='store_true', default=False, help="Specifies if logging to console should be disabled.")
    opts,args = p.parse_args()
    
    if opts.config is None:
        print >>sys.stderr, "No config file specified. Use --help"
        sys.exit(1)
    
    config = Config(opts, args)
    
    start_date = config.get_value('PORTFOLIO', 'startdate')
    end_date = config.get_value('PORTFOLIO', 'enddate')
    log_file_name = ".".join([str(start_date), str(end_date), log_file_ext])
    
    global stat_file_name
    stat_file_name = ".".join([str(start_date), str(end_date), stat_file_ext])
    
    logging.basicConfig(filename=log_file_name, filemode='w') ## file will be overwritten each time
    logger = logging.getLogger('backtester')
    logger.setLevel(logging.INFO)
    
    console_handler = logging.StreamHandler()
    console_handler.setLevel(logging.WARNING if config.get_value('BACKTEST', 'silent') else logging.INFO)
    logger.addHandler(console_handler)
    
    return config, logger
 def test_stftConfig_single_res_01():
   with make_scope() as session:
     layer_name = "stft_layer"
     fft_sizes = [400]
     frame_sizes = [400]
     frame_shift = 160
     window = "hanning"
     test_input = np.ones((1, 32000, 2), dtype=np.float32)
     num_outputs = (int(fft_sizes[0] / 2) + 1) * test_input.shape[2]
     config = Config()
     config.update({
       "num_outputs": num_outputs,
       "num_inputs": test_input.shape[2],
       "network": {
         layer_name: {
           "class": "multichannel_multiresolution_stft_layer", "frame_shift": frame_shift, "frame_sizes": frame_sizes, "window": window, "fft_sizes": fft_sizes, "use_rfft": True, "nr_of_channels": 2, "is_output_layer": True}
       }})
     network = TFNetwork(config=config, train_flag=True)
     network.construct_from_dict(config.typed_value("network"))
     layer = network.layers[layer_name]
     test_output = session.run(layer.output.placeholder, {network.get_extern_data('data').placeholder: test_input})
     ref0 = _get_ref_output_single_res(test_input, fft_sizes[0], frame_sizes[0], frame_shift, window, 0, 0)
     resultDiff = np.abs(test_output[0, 0, 0:(int(fft_sizes[0] / 2) + 1)] - ref0)
     assert test_output.shape[2] == num_outputs
     assert np.mean(resultDiff) < 0.02 
     assert np.max(resultDiff) < 1 
Beispiel #26
0
def test_enc_dec1_init():
  config = Config()
  config.load_file(StringIO(config_enc_dec1_json))

  network_json = LayerNetwork.json_from_config(config)
  assert_true(network_json)
  network = LayerNetwork.from_json_and_config(network_json, config)
  assert_true(network)
def get_config(pytest_config):
    global _config, _config_file_name
    if _config is None:
        _config_file_name = pytest_config.getoption('config')
        print("\nconfig file is {}".format(_config_file_name))
        _config = Config(_config_file_name)
        _config.read_config()
    return _config
Beispiel #28
0
class OsmWrapper():
    def __init__(self, url=None, api=None):
        self.cfg = Config()
        if url:
            self.url = url
        else:
            try:
                self.url = self.cfg.get("DEFAULT", "api")
            except NoOptionError:
                self.url = "api.openstreetmap.org"

        if not api:
            self.api = OsmApi(api=self.url,
                          username=self.cfg.get("Authentication", "username"),
                          password=b64decode(self.cfg.get("Authentication", "password")),
                          appid="Piote/%s" % Piote.version)
        else:
            self.api = api

    def Get(self, obj, id):
        if obj == "node":
            f = self.api.NodeGet
        elif obj == "way":
            f = self.api.WayGet
        elif obj == "relation":
            f = self.api.RelationGet
        Piote.data = f(id)
        self.data = Piote.data
        return Piote.data["tag"]

    def Put(self, msg, obj, model, data=None):
        changeset = self.api.ChangesetCreate({u"comment": unicode(msg, "utf-8")})
        tags = defaultdict(str)

        for row in model:
            tags[unicode(row[0], "utf-8")] = unicode(row[1], "utf-8")

        if data:
            data["tag"] = tags
        else:
            Piote.data["tag"] = tags

        if obj == "node":
            f = self.api.NodeUpdate
        elif obj == "way":
            f = self.api.WayUpdate
        elif obj == "relation":
            f = self.api.RelationUpdate

        if data:
            result = f(data)
            data["version"] = result["version"]
        else:
            result = f(Piote.data)
            Piote.data["version"] = result["version"]

        #self.api.ChangesetUpload([{"type":obj, "action":"modify", "data":self.data}])
        self.api.ChangesetClose()
Beispiel #29
0
def log(msg="", log_type=LOG_SYSTEM, log_level=ERROR, log_file=""):
    config = Config()
    if config.get("log_record") == "False":
        return False
    if msg == "":
        #捕获异常信息
        msg = traceback.format_exc().splitlines()
    #写日志
    l.write(msg, log_file, log_type, log_level)
 def initConfig(self):
     cfg = Config()
     if not hasattr(cfg, "device"):
         cfg.device = u""
     if not hasattr(cfg, "datFile"):
         cfg.datFile = u""
     if not hasattr(cfg, "simulateEmulator"):
         cfg.simulateEmulator = False
     self.config = cfg
Beispiel #31
0
                    raise ValueError('查询结果和填充参数不匹配,无法转化!')
                for i in range(len(result)):
                    fill_field_value = result[i]
                    if isinstance(
                            result[i],
                            decimal.Decimal) and decimal_type_convert_to == 1:

                        fill_field_value = float(result[i])
                    if isinstance(
                            result[i],
                            decimal.Decimal) and decimal_type_convert_to == 2:

                        fill_field_value = int(result[i])
                    if isinstance(result[i],
                                  datetime.datetime):  # 如果数据库查询返回的值为时间类型,则需要转换

                        fill_field_value = str(result[i])

                    json_dic.update({fill_field_list[i]: fill_field_value})
                result_json_list.append(json_dic)
        else:
            raise TypeError('查询结果和填充参数必须为列表类型!')

        return result_json_list


if __name__ == '__main__':
    sql = '''select a.FID,a.FAccount,a.FCompanyID from TAccounts as a where a.FAccount='ttt11' and a.FCompanyID in (select a.FCompanyID from TAccounts as a where a.FAccount='csdqt')'''
    data = DB_Util(Config().db_config_name).excute_sql(sql)
    data = DB_Util.sql_result_to_json(data, ['id', 'name', 'cid'])
    print(data)
# encoding: utf-8
from __future__ import unicode_literals, print_function
import datetime, sys, os
from workflow import Workflow, ICON_WEB, web
from Config import Config

log = None
if __name__ == u"__main__":
    wf = Workflow()
    log = wf.logger

    log.debug("script started")
    c = Config()

    #routes = c.get_route()

    # Get route name as selected from the user
    route_name = wf.decode(os.getenv('env_route_name'))

    log.debug("Got route name %s" % (route_name))

    #Route name is now e.g. Kåk

    #utf8_route_name = unicode(route_name, 'utf-8')

    #log.debug("Created the utf8 route name %s" % (utf8_route_name))

    #log.debug(route_name)

    #log.debug(route_name.encode("utf-8"))
Beispiel #33
0
from Config import Config, ChromosomeConfig, FunctionParameters
from math import pi
from XmlFile import XmlFileReader
import os

global config
global window

if(os.path.exists('config.xml')):
    reader = XmlFileReader('config.xml')
    config = reader.getConfig()
else:
    fc = FunctionParameters(20, 0.2, 2*pi)
    cc = ChromosomeConfig()
    config = Config(100, cc)
window = 1
def test_RecLayer_NativeLstm_Nan():
    print("test_RecLayer_NativeLstm_Nan()")
    print("GPU available:", is_gpu_available())
    numpy.set_printoptions(precision=15)
    num_inputs = 4
    num_outputs = 3

    config = Config()
    config.update({
        "num_inputs": num_inputs,
        "num_outputs": {
            "data": [num_inputs, 2],
            "classes": [num_outputs, 2]
        },  # dense output
        "network": {
            "output": {
                "class": "rec",
                "unit": "NativeLSTM",
                "loss": "mse"
            }
        },
        "adam": True,
        "debug_grad_summaries": True,
        "debug_save_updater_vars": True,
        "debug_add_check_numerics_ops": True,
    })

    print("Reset default graph...")
    tf.reset_default_graph()
    print("Create network...")
    network = TFNetwork(config=config, train_flag=True)
    network.construct_from_dict(config.typed_dict["network"])

    # Depending on the seed, I get nan earlier, later, or not at all.
    # limit=5.0: seed=3 -> nan in step 4094. seed=1 -> nan in step 2463.
    random = numpy.random.RandomState(seed=1)
    limit = 10.0  # The higher, the more likely you get nan.

    def make_feed_dict(seq_len=10):
        return {
            network.extern_data.data["data"].placeholder:
            random.uniform(-limit, limit, (1, seq_len, num_inputs)),
            network.extern_data.data["data"].size_placeholder[0]:
            numpy.array([seq_len]),
            network.extern_data.data["classes"].placeholder:
            random.uniform(-limit, limit, (1, seq_len, num_outputs)),
            network.extern_data.data["classes"].size_placeholder[0]:
            numpy.array([seq_len]),
        }

    print("Creating session...")
    with tf.Session() as session:
        print("Init params...")
        network.initialize_params(session=session)
        print("Test run...")
        output_data1 = session.run(
            network.get_default_output_layer().output.placeholder,
            feed_dict=make_feed_dict(5))
        assert_equal(output_data1.shape,
                     (5, 1, num_outputs))  # (time, batch, dim)

        layer = network.layers["output"]
        loss_t = network.get_total_loss(
        ) * layer.get_loss_normalization_factor()
        weights_t = layer.params["W"]
        weights_grad_t, = tf.gradients(network.get_objective(), weights_t)

        def find_op_by_type(type_name):
            for op in session.graph.get_operations():
                assert isinstance(op, tf.Operation)
                if op.type == type_name:
                    return op

        lstm_grad_op = find_op_by_type("GradOfLstmGenericBase")
        assert lstm_grad_op is not None
        lstm_grad_ins_t = list(lstm_grad_op.inputs)
        lstm_grad_outs_t = list(lstm_grad_op.outputs)
        lstm_grad_func = _lstm_grad_op(session=session)
        demo_grad_t = lstm_grad_func(*_demo_lstm_grad_args())
        demo_grad2_input_placeholders = [
            tf.placeholder(v.dtype) for v in lstm_grad_ins_t
        ]
        demo_grad2_t = lstm_grad_func(*demo_grad2_input_placeholders)[1]

        print("Create updater...")
        from TFUpdater import Updater
        updater = Updater(config=config, network=network, tf_session=session)
        updater.set_trainable_vars(network.get_trainable_params())
        updater.set_learning_rate(0.1)
        optim_op = updater.get_optim_op()
        assert isinstance(updater.optimizer, tf.train.AdamOptimizer)
        adam_weights_m_t = updater.optimizer.get_slot(var=weights_t, name="m")
        adam_weights_v_t = updater.optimizer.get_slot(var=weights_t, name="v")
        assert isinstance(adam_weights_m_t, tf.Variable)
        assert isinstance(adam_weights_v_t, tf.Variable)
        summaries_t = tf.summary.merge_all()

        # https://github.com/tensorflow/tensorflow/blob/03beb65cecbc1e49ea477bca7f54543134b31d53/tensorflow/core/kernels/training_ops_gpu.cu.cc
        adam_update_t = adam_weights_m_t / (tf.sqrt(adam_weights_v_t) + 1e-8)

        import tempfile
        tmp_tf_logdir = tempfile.mkdtemp("tmp-tf-log")
        print("Write TF logs to:", tmp_tf_logdir)
        writer = tf.summary.FileWriter(tmp_tf_logdir)
        writer.add_graph(session.graph)

        print("Training...")
        recent_info = []  # type: list[dict[str]]
        for i in range(10000):
            feed_dict = make_feed_dict(5)
            weights_grad, lstm_grad_ins, lstm_grad_outs = session.run(
                [weights_grad_t, lstm_grad_ins_t, lstm_grad_outs_t],
                feed_dict=feed_dict)
            try:
                if not numpy.all(numpy.isfinite(weights_grad)):
                    raise Exception("weights_grad has inf or nan.")
                loss, _opt, summaries, weights, adam_update = session.run(
                    [loss_t, optim_op, summaries_t, weights_t, adam_update_t],
                    feed_dict=feed_dict)
            except Exception as exc:
                print("Exception in step %i." % i)
                print(exc)
                print("Most recent summaries:")
                summary_proto = tf.Summary()
                summary_proto.ParseFromString(recent_info[-1]["summaries"])
                for val in summary_proto.value:
                    # Assuming all summaries are scalars.
                    print("  %s: %r" % (val.tag, val.simple_value))
                print("Most recent weights:")
                print(recent_info[-1]["weights"])
                print("Current weights:")
                print(session.run(weights_t))
                print("Most recent Adam update:")
                print(recent_info[-1]["adam_update"])
                print("Current Adam update:")
                print(session.run(adam_update_t))
                print("Used weights grad:")
                print(weights_grad)
                print("GradOfLstmGenericBase inputs:")
                for t, v in zip(lstm_grad_ins_t, lstm_grad_ins):
                    print("%r:" % t)
                    print(repr(v))
                print("GradOfLstmGenericBase outputs:")
                for t, v in zip(lstm_grad_outs_t, lstm_grad_outs):
                    print("%r:" % t)
                    print(repr(v))
                print("Demo grad:")
                print(session.run(demo_grad_t))
                print("Demo grad2:")
                print(
                    session.run(
                        demo_grad2_t,
                        feed_dict={
                            k: v
                            for (k, v) in zip(demo_grad2_input_placeholders,
                                              lstm_grad_ins)
                        }))
                print("Demo grad2 via eval:")
                print(
                    session.run(
                        demo_grad2_t,
                        feed_dict={
                            k: eval(repr(v), vars(numpy))
                            for (k, v) in zip(demo_grad2_input_placeholders,
                                              lstm_grad_ins)
                        }))
                print("Demo grad2 via args:")
                print(
                    session.run(
                        demo_grad2_t,
                        feed_dict={
                            k: v
                            for (k, v) in zip(demo_grad2_input_placeholders,
                                              _demo_lstm_grad_args())
                        }))
                raise Exception("Exception in step %i." % i)
            writer.add_summary(summaries, global_step=i)
            if len(recent_info) > 1000:
                recent_info.pop(0)
            recent_info.append({
                "step": i,
                "loss": loss,
                "summaries": summaries,
                "weights": weights,
                "adam_update": adam_update
            })
            if not numpy.isfinite(loss) or i % 100 == 0:
                print("step %i, loss: %r" % (i, loss))
            assert numpy.isfinite(loss)

    print("Done.")
    import shutil
    shutil.rmtree(tmp_tf_logdir)
Beispiel #35
0
def btc_analyzer_cli(start_date, last_date):
    """

    Blockchain analyzer is a tool for parse a Bitcoin blocks and track a specific transaction.

    """
    conf = Config()
    # root folder path.
    root_folder = path.dirname(path.dirname(__file__))
    # path to log file
    log_path = "{}/log".format(root_folder)
    # create log folder if not exists.
    if not path.exists(log_path):
        makedirs(log_path)

    # cofigure logger
    conf_logger(log_path=log_path, file_name="blockchain_blocks_analyzer")
    logger = logging.getLogger("blockchain_blocks_analyzer")

    if not (start_date and last_date):
        click.echo("start date and last date must be specify.")
        return

    logger.info("starting btc-analyzer.")

    # swap dates if start date is bigger.
    if start_date > last_date:
        start_date, last_date = last_date, start_date

    # get all date range in a list
    logger.info("calculating dates.")
    num_days = last_date - start_date
    dates = [
        start_date + timedelta(days=d) for d in tqdm(range(0, num_days.days))
    ]

    # request all the blocks between the range date.
    logger.info("fetching all blocks hash by given date.")
    blocks_hash = []
    for date in tqdm(dates):
        try:
            blocks = get_blocks_by_date(time_mils=to_millis(date))
        except json.decoder.JSONDecodeError:
            logger.error('json decode for date: {} failed.'.format(date))
            continue

        blocks_hash.extend(list(map(lambda b: b['hash'], blocks['blocks'])))

    # parse transaction.
    logger.info("fetching all transaction by given block.")
    trxs = []
    for block_hash in blocks_hash:
        try:
            trxs.extend(generate_docs(get_single_block(block_hash=block_hash)))
        except json.decoder.JSONDecodeError:
            logger.error('json decode for hash: {} failed.'.format(block_hash))
            continue

        if len(trxs) >= 50:
            # upload date to elasticsearch
            logger.info("uploading transactions so far to elasticsearch.")
            add_index(es_url=conf.elasticsearch_url,
                      body=trxs,
                      index_name='btc_transactions',
                      doc_type='btc_trx')
            trxs.clear()

    # upload date to elasticsearch
    logger.info("uploading all transaction to elasticsearch.")
    add_index(es_url=conf.elasticsearch_url,
              body=trxs,
              index_name='btc_transactions',
              doc_type='btc_trx')

    # track algorithm, after specific transaction.
    pass
Beispiel #36
0
import os
from selenium import webdriver

from Config import Config

#from selenium.webdriver.support.ui import WebDriverWait
import time

#PATHCHROMEDRIVER = "/usr/local/bin/chromedriver"
#URL = 'https://phptravels.com/demo/'
#URL = 'http://192.168.11.186/Migracion-Web/admin/login'

#**********************************************************************
#Config:
config = Config()
URL = config.getUrl()
PATHCHROMEDRIVER = config.getPathChromeDriver()
print(URL)
print(PATHCHROMEDRIVER)

chromedriver = PATHCHROMEDRIVER
os.environ["webdriver.chrome.driver"] = chromedriver
driver = webdriver.Chrome(chromedriver)

driver.get(URL)

#**********************************************************************
#Admin Login:

#Ingresa Datos en Formulario Login (username, password)
driver.find_element_by_id("username").send_keys("zweicom")
 def __init__(self, machine_status: MachineStatus,
              pulse_transmitter: PulseTransmitter):
     super().__init__(machine_status, pulse_transmitter)
     self.input_pin = Config.get_config_variable('extruder_input_pin')
     self.pulse_transmitter.register_receiver(self.transmit_pulse)
Beispiel #38
0
from contributors import getContributorList
from logging.handlers import RotatingFileHandler

from Config import Config
from Constants import *
from flask import Flask
from flask import request
import json
import logging
import os
import requests

app = Flask(__name__)

config = Config('app.properties')
username = config.getProperty('DEFAULT', 'username')
password = config.getProperty('DEFAULT', 'password')
message = config.getProperty('comments', 'message')
newUsersOnly = config.getBoolean('DEFAULT', 'new-users')
postPREndPoint = config.getProperty('DEFAULT', 'end-point')


@app.route(postPREndPoint, methods=["POST"])
def postPRCommentToNewContributor():

    pullRequestEvent = json.loads(request.data)

    # Only comment on PRs that have just been opened
    if (pullRequestEvent.has_key(ACTION)
            and pullRequestEvent[ACTION] == OPENED):
        pullRequest = pullRequestEvent[PULL_REQUEST]
Beispiel #39
0
 def __init__(self, dry=False, debug=False):
     self.phase = STEP_END
     self.mc = MultiCommand(dry=dry, debug=debug)
     self.branches = Config().getBranches()
Beispiel #40
0
def info():
    logger = Log('info')
    config = Config()

    def dump_external_info(title, args):
        logger.info('')
        logger.info(f'*****{title}******')
        p = subprocess.Popen(args, stdout=subprocess.PIPE)
        out, err = p.communicate()
        lines = filter(lambda x: x.strip(), out.decode("utf-8").splitlines())
        for l in lines:
            logger.info(l)

    def dump_class(title, clazz):
        logger.info('')
        logger.info(f'*****{title}******')
        for attr in clazz.__dict__:
            if attr.startswith('_'):
                logger.info('%s = %r' % (attr[1:], getattr(config, attr)))

    def dump_dict(title, dic):
        logger.info('')
        logger.info(f'*****{title}******')
        for x, y in dic.items():
            logger.info('%s = %r' % (x, y))

    logger.info('*****COMPONENT******')
    name = os.environ.get('C_NAME')
    version = os.environ.get('C_VERSION')
    logger.info(f'Name: {name}')
    logger.info(f'Version: {version}')

    dump_class('*****CONFIG******', config)

    dump_dict(
        '*****SYSTEM******', {
            'platform': platform.system(),
            'release': platform.release(),
            'type': platform.uname().system,
            'arch': platform.uname().machine,
            'cpus': json.dumps(os.cpu_count()),
            'hostname': socket.gethostname()
        })

    dump_dict('*****ENVIROMENT VARS******', dict(os.environ))

    dump_dict(
        '*****PYTHON******', {
            'python_version': platform.python_version(),
            'python_build': platform.python_build(),
            'python_revision': platform.python_revision(),
            'python_compiler': platform.python_compiler(),
            'python_branch': platform.python_branch(),
            'python_implementation': platform.python_implementation()
        })

    dump_external_info('TERRAFORM VERSION', ['terraform', '--version'])
    dump_external_info('AZURECLI VERSION', ['az', '--version'])
    dump_external_info('ANSIBLE VERSION', ['ansible', '--version'])
    dump_external_info('ANSIBLE CONFIG', ['ansible-config', 'dump'])
    dump_external_info('ANSIBLE-VAULT VERSION', ['ansible-vault', '--version'])

    return 0
Beispiel #41
0
import re
import os
import types
import csv
import time
import pickle
import numpy as np
import pandas as pd
import random
from Config import Config
import sys
reload(sys)
sys.setdefaultencoding('utf-8')

np.random.seed(1337)
config = Config()


def getEmbedding(emb_file, char2id):
    emb_dic = {}
    with open(emb_file, 'rb') as f:
        for line in f.readlines():
            line = line.rstrip().decode('utf-8')
            line_list = line.split('\t')
            key = line_list[0]
            line_list.pop(0)
            for i in xrange(len(line_list)):
                line_list[i] = float(line_list[i])
            emb_dic[key] = line_list
    embedding_matrix = np.zeros(
        (len(char2id.keys()), config.model_para['input_dim']))
Beispiel #42
0
 def __init__(self):
     self.config = Config.Config()
     self.log = Log.MyLog()
Beispiel #43
0
            while not exists("project_ui_elements.png"):
                wait(1)

            if exists("add_first_person_dialog.png"):
                click(find(Pattern("cancel_button.png").similar(0.67)))
                wait(2)
            
            app.switchingProject()
            
            wait(2)
            if exists("project_ui_elements.png"):
                print "Switching between projects was successful"
            else: 
                print "Something went wrong...Switching project was failed"
            
            app.closeApp()
            base_cases.BaseMyHeritageTestCase.cleanUserData()

Config.init()
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(SwitchingProjectTestCase))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(sikuli1.CreatingProjectTest))

outfile = open(Config.get_reports_path() + "/%s.html" % (project_name), "w")
runner = HTMLTestRunner.HTMLTestRunner(stream=outfile, title=' Report Title', description='desc..' )
runner.run(suite)
outfile.close()


Beispiel #44
0
 def setUp(self):
     self.conf = Config.get_instance()
Beispiel #45
0
	def do_lpwd(self,  line):
		print self.fs.pwd()
		
	def do_lls(self,  line):
		if len(line.strip()) >  0:
			res = self.fs.ls_native2(line.split(" "))
		else:
			res = self.fs.ls_native2()
		for l in res:
			print l

if __name__ == "__main__":
	rundir = os.path.dirname(sys.argv[0])
	basedir = os.path.abspath(rundir) +  "/"
		
	config = Config.conf_to_dict(basedir + "/config.txt")
	config["BASEDIR"] = basedir
	# replace relative path'
	for key in config:
		if key.startswith("PATH_"):
			config[key] = os.path.abspath(config["BASEDIR"] + config[key])

	try:
		dev_file_in_path = config["HID_RAW_DEV"]
		dev_file_out_path = config["HID_RAW_DEV"]

		
		HIDin_file = open(dev_file_in_path, "rb")
		HIDout_file = open(dev_file_out_path, "wb")

		# the linklayer starts several communication threads
Beispiel #46
0
class Indexer:
    """
    Class to create, store and load an inverted index
    """
    def __init__(self, new_config):
        """
        Namespace config: Arguments passed on the command line
        """
        self.root_dir = os.path.dirname(
            os.path.dirname(os.path.abspath(__file__)))
        stored_config = self.get_config(new_config)
        stored_config.update(vars(new_config))
        self.config = Config(**stored_config)

    def get_config(self, params):
        """
        Returns the configuration from the disk (if it exists), otherwise an empty dict
        Namespace params: Configuration params from command line
        """
        config = {}
        try:
            with open(
                    self.root_dir + '/' + params.index_dir + '/' +
                    params.config_file_name, 'r') as f:
                # Load config from disk
                config = json.load(f)
        except:
            # Return empty dictionary
            config = {}

        return config

    def load_data(self):
        """
        Loads a data file from the disk for any extension, currently only .json is supported
        """
        data_file = self.root_dir + '/' + self.config.data_dir + \
            '/' + self.config.data_file_name
        with open(data_file, 'r') as f:
            root, ext = os.path.splitext(data_file)
            # Check if file extension is .json
            if ext == '.json':
                # Load json data from file and return
                return json.load(f)

    def create_inverted_index(self, compressed):
        """
        Creates and returns an inverted index
        bool compressed: Flag to choose between a compressed / uncompressed index
        """
        inverted_index = InvertedIndex(self.config, compressed)
        data = self.load_data()
        doc_id = -1
        for scene in data['corpus']:
            doc_id += 1
            scene_text = scene['text']
            # Filter None removes empty strings from the list after the split on space
            terms = list(filter(None, scene_text.split()))
            doc_meta = {
                'playId': scene['playId'],
                'sceneId': scene['sceneId'],
                'sceneNum': scene['sceneNum'],
                'sceneLength': len(terms)
            }
            inverted_index.update_docs_meta(doc_id, doc_meta)
            inverted_index.update_collection_stats(
                doc_length=doc_meta['sceneLength'])
            for position, term in enumerate(terms):
                inverted_index.update_map(term, doc_id, position)
        inverted_index.update_collection_stats(average_length=True)
        inverted_index.load_vocabulary()
        return inverted_index

    def get_inverted_index(self, compressed):
        """
        Loads an inverted index from file or calls the create method if it doesn't exist
        bool compressed: Flag to choose between a compressed / uncompressed index
        """
        inverted_index = None
        try:
            with open(
                    self.root_dir + '/' + self.config.index_dir + '/' +
                    self.config.collection_stats_file_name,
                    'rb') as collection_stats_file:
                with open(
                        self.root_dir + '/' + self.config.index_dir + '/' +
                        self.config.docs_meta_file_name,
                        'rb') as docs_meta_file:
                    if not compressed:
                        with open(
                                self.root_dir + '/' + self.config.index_dir +
                                '/' + self.config.uncompressed_dir + '/' +
                                self.config.lookup_table_file_name,
                                'r') as lookup_table_file:
                            with open(
                                    self.root_dir + '/' +
                                    self.config.index_dir + '/' +
                                    self.config.uncompressed_dir + '/' +
                                    self.config.inverted_lists_file_name,
                                    'rb') as inverted_lists_file:
                                # Load lookup table, docs meta info and inverted lists(if in_memory is True) from uncompressed version on disk
                                inverted_index = self.load_inverted_index_in_memory(
                                    collection_stats_file, docs_meta_file,
                                    lookup_table_file, inverted_lists_file,
                                    False)
                    if compressed:
                        with open(
                                self.root_dir + '/' + self.config.index_dir +
                                '/' + self.config.compressed_dir + '/' +
                                self.config.lookup_table_file_name,
                                'r') as lookup_table_file:
                            with open(
                                    self.root_dir + '/' +
                                    self.config.index_dir + '/' +
                                    self.config.compressed_dir + '/' +
                                    self.config.inverted_lists_file_name,
                                    'rb') as inverted_lists_file:
                                # Load lookup table, docs meta info and inverted lists(if in_memory is True) from compressed version on disk
                                inverted_index = self.load_inverted_index_in_memory(
                                    collection_stats_file, docs_meta_file,
                                    lookup_table_file, inverted_lists_file,
                                    True)
        except Exception as e:
            # Create inverted index
            inverted_index = self.create_inverted_index(compressed)
            self.dump_inverted_index_to_disk(inverted_index)
            if not self.config.in_memory:
                self.remove_inverted_index_from_memory(inverted_index)

        return inverted_index

    def load_inverted_index_in_memory(self, collection_stats_file,
                                      docs_meta_file, lookup_table_file,
                                      inverted_lists_file, compressed):
        """
        Loads an inverted index in memory, inverted lists are not loaded by default
        buffer collection_stats_file: Buffer for the collection stats file
        buffer docs_meta_file: Buffer for the docs meta file
        buffer lookup_table_file: Buffer for the lookup table file
        buffer inverted_lists_file: Buffer for the inverted lists file
        """
        inverted_index = InvertedIndex(self.config, compressed)

        # Load collection statistics
        collection_stats = json.load(collection_stats_file)
        inverted_index.load_collection_stats(collection_stats)

        # Load meta info for documents
        docs_meta = json.load(docs_meta_file)
        inverted_index.load_docs_meta(docs_meta)

        # Load lookup table
        lookup_table = json.load(lookup_table_file)
        inverted_index.load_lookup_table(lookup_table)

        # Load vocabulary
        inverted_index.load_vocabulary()

        # Load inverted lists only if in_memory is True
        if self.config.in_memory:
            index_map = defaultdict(InvertedList)
            for term, term_stats in lookup_table.items():
                inverted_list = index_map[term]
                inverted_list_binary = inverted_index.read_inverted_list_from_file(
                    inverted_lists_file, term_stats['posting_list_position'],
                    term_stats['posting_list_size'])
                inverted_list.bytearray_to_postings(inverted_list_binary,
                                                    compressed,
                                                    term_stats['df'])
            inverted_index.load_map(index_map)

        return inverted_index

    def create_document_vectors(self, inverted_index):
        with open(
                self.root_dir + '/' + self.config.index_dir + '/' +
                self.config.document_vectors_file_name, 'wb') as file_buffer:
            data = self.load_data()
            doc_id = -1
            vocabulary = inverted_index.get_vocabulary()
            document_vectors = defaultdict(DocumentVector)
            for scene in data['corpus']:
                doc_id += 1
                document_vector = document_vectors[doc_id]
                document_vector.set_doc_id(doc_id)
                scene_text = scene['text']
                terms = list(filter(None, scene_text.split()))
                # Get the unique terms from the text as we need to calculate score for each term
                unique_terms = set(terms)
                for term in unique_terms:
                    term_id = vocabulary.index(term)
                    # Refer Chapter - 7, page 242 for the calculation below
                    fik = terms.count(term)
                    N = inverted_index.get_total_docs()
                    nk = inverted_index.get_df(term)
                    term_value = 0
                    if fik:
                        term_value = (math.log(fik) + 1) * \
                            math.log((N + 1) / (nk + 0.5))
                    # Add an entry with this term_id, term_value pair to the doc vector for this doc
                    document_vector.add_doc_vector_entry(term_id, term_value)

                numerator = np.array(
                    list(document_vector.get_doc_vector().values()))
                denominator = np.linalg.norm(numerator)
                normalized_vector = numerator / denominator
                for idx, term_id in enumerate(
                        list(document_vector.get_doc_vector().keys())):
                    normalized_term_value = float(normalized_vector[idx])
                    document_vector.add_doc_vector_entry(
                        term_id, normalized_term_value)
                doc_vector_binary, size_in_bytes = document_vector.vector_to_bytearray(
                )
                position_in_file = file_buffer.tell()
                file_buffer.write(doc_vector_binary)
                doc_meta = inverted_index.get_doc_meta(doc_id)
                doc_meta['document_vector_position'] = position_in_file
                doc_meta['document_vector_size'] = size_in_bytes
                inverted_index.update_docs_meta(doc_id, doc_meta)

        with open(
                self.root_dir + '/' + self.config.index_dir + '/' +
                self.config.docs_meta_file_name, 'w') as f:
            json.dump(inverted_index.get_docs_meta(), f)

    def get_document_vectors(self, inverted_index):
        number_of_docs = inverted_index.get_total_docs()
        document_vectors = defaultdict(DocumentVector)
        with open(
                self.root_dir + '/' + self.config.index_dir + '/' +
                self.config.document_vectors_file_name,
                'rb') as document_vectors_file:
            for doc_id in range(number_of_docs):
                doc_meta = inverted_index.get_doc_meta(doc_id)
                position_in_file = doc_meta['document_vector_position']
                size_in_bytes = doc_meta['document_vector_size']
                document_vectors_file.seek(position_in_file)
                document_vector = document_vectors[doc_id]
                document_vector.set_doc_id(doc_id)
                document_vector_binary = bytearray(
                    document_vectors_file.read(size_in_bytes))
                document_vector.bytearray_to_vector(document_vector_binary,
                                                    size_in_bytes)
        return document_vectors

    def create_prior(self, inverted_index, prior_type):
        with open(
                self.root_dir + '/' + self.config.index_dir + '/' +
                prior_type + '_priors', 'wb') as file_buffer:
            total_docs = inverted_index.get_total_docs()
            random.seed(0)
            prior = 0
            # Initialize an empty bytearray
            prior_binary = bytearray()
            size_in_bytes = 0
            format_prior = '<d'
            for doc_id in range(total_docs):
                if prior_type == 'uniform':
                    prior = math.log(1 / total_docs)
                else:
                    prior = math.log(random.random())
                # Convert prior to binary using little-endian byte-order and float format (8 bytes)
                prior_binary += struct.pack(format_prior, prior)
                size_in_bytes += struct.calcsize(format_prior)
            file_buffer.write(prior_binary)

    def dump_inverted_lists_to_disk(self, file_buffer, inverted_index):
        """
        Stores the inverted lists on disk
        buffer file_buffer: Buffer for the inverted lists file
        class inverted_index: Instance of the inverted index being used
        """
        for term, inverted_list in inverted_index.get_map().items():
            position_in_file = file_buffer.tell()
            inverted_list_binary, size_in_bytes = inverted_list.postings_to_bytearray(
                inverted_index.compressed)
            file_buffer.write(inverted_list_binary)
            inverted_index.update_lookup_table(term, position_in_file,
                                               size_in_bytes)

    def dump_inverted_index_to_disk(self, inverted_index):
        """
        Stores the docs meta, configuration, lookup table and inverted lists on disk
        class inverted_index: Instance of the inverted index being used
        """
        # Create the index directory if it doesn't exist
        if not os.path.exists(self.root_dir + '/' + self.config.index_dir):
            os.mkdir(self.root_dir + '/' + self.config.index_dir)

        if not self.config.compressed:
            # Create uncompressed index directory if it doesn't exist
            if not os.path.exists(self.root_dir + '/' + self.config.index_dir +
                                  '/' + self.config.uncompressed_dir):
                os.mkdir(self.root_dir + '/' + self.config.index_dir + '/' +
                         self.config.uncompressed_dir)

            with open(
                    self.root_dir + '/' + self.config.index_dir + '/' +
                    self.config.uncompressed_dir + '/' +
                    self.config.inverted_lists_file_name, 'wb') as f:
                self.dump_inverted_lists_to_disk(f, inverted_index)

            with open(
                    self.root_dir + '/' + self.config.index_dir + '/' +
                    self.config.uncompressed_dir + '/' +
                    self.config.lookup_table_file_name, 'w') as f:
                json.dump(inverted_index.get_lookup_table(), f)

        if self.config.compressed:
            # Create compressed index directory if it doesn't exist
            if not os.path.exists(self.root_dir + '/' + self.config.index_dir +
                                  '/' + self.config.compressed_dir):
                os.mkdir(self.root_dir + '/' + self.config.index_dir + '/' +
                         self.config.compressed_dir)

            with open(
                    self.root_dir + '/' + self.config.index_dir + '/' +
                    self.config.compressed_dir + '/' +
                    self.config.inverted_lists_file_name, 'wb') as f:
                self.dump_inverted_lists_to_disk(f, inverted_index)

            with open(
                    self.root_dir + '/' + self.config.index_dir + '/' +
                    self.config.compressed_dir + '/' +
                    self.config.lookup_table_file_name, 'w') as f:
                json.dump(inverted_index.get_lookup_table(), f)

        with open(
                self.root_dir + '/' + self.config.index_dir + '/' +
                self.config.collection_stats_file_name, 'w') as f:
            json.dump(inverted_index.get_collection_stats(), f)

        with open(
                self.root_dir + '/' + self.config.index_dir + '/' +
                self.config.docs_meta_file_name, 'w') as f:
            json.dump(inverted_index.get_docs_meta(), f)

        with open(
                self.root_dir + '/' + self.config.index_dir + '/' +
                self.config.config_file_name, 'w') as f:
            json.dump(self.config.get_params(), f)

    def remove_inverted_index_from_memory(self, inverted_index):
        """
        Removes an inverted index from memory to free up memory
        class inverted_index: Instance of the inverted index being used
        """
        inverted_index.delete_map()
Beispiel #47
0
from Cecilio import *
from Config import Config

if __name__ == "__main__":
    run(Config(meeting_distances_config, commonConfiguration, meet_object_v2))
Beispiel #48
0
def test_engine_search():
    from GeneratingDataset import DummyDataset
    seq_len = 5
    n_data_dim = 2
    n_classes_dim = 3
    dataset = DummyDataset(input_dim=n_data_dim,
                           output_dim=n_classes_dim,
                           num_seqs=2,
                           seq_len=seq_len)
    dataset.init_seq_order(epoch=1)

    config = Config()
    config.update({
        "model": "/tmp/model",
        "batch_size": 5000,
        "num_outputs": n_classes_dim,
        "num_inputs": n_data_dim,
        "network": {
            "output": {
                "class": "rec",
                "from": [],
                "max_seq_len": 10,
                "target": "classes",
                "unit": {
                    "prob": {
                        "class": "softmax",
                        "from": ["prev:output"],
                        "loss": "ce",
                        "target": "classes"
                    },
                    "output": {
                        "class": "choice",
                        "beam_size": 4,
                        "from": ["prob"],
                        "target": "classes",
                        "initial_output": 0
                    },
                    "end": {
                        "class": "compare",
                        "from": ["output"],
                        "value": 0
                    }
                }
            },
            "decision": {
                "class": "decide",
                "from": ["output"],
                "loss": "edit_distance"
            }
        }
    })
    engine = Engine(config=config)
    # Normally init_network can be used. We only do init_train here to randomly initialize the network.
    engine.init_train_from_config(config=config,
                                  train_data=dataset,
                                  dev_data=None,
                                  eval_data=None)
    print("network:")
    pprint(engine.network.layers)
    assert "output" in engine.network.layers
    assert "decision" in engine.network.layers

    engine.search(dataset=dataset)
    print("error keys:")
    pprint(engine.network.error_by_layer)
    assert engine.network.total_objective is not None
    assert "decision" in engine.network.error_by_layer

    engine.finalize()
def test_cudnn_save_restore():
    import tempfile, shutil, os
    from tensorflow.python.training.saver import BaseSaverBuilder
    model_tmp_dir = tempfile.mkdtemp("tmp-checkpoint")
    model_filename = model_tmp_dir + "/model"
    try:
        num_inputs = 4
        input_data = numpy.array(
            [[[1, -0.2, 0.3, -4], [2, -0.6, 0.7, -1.8], [1, 0.3, -0.1, -0.8],
              [0.1, -0.2, 0.2, .8]]],
            dtype="float32")
        seq_lens = numpy.array([4], dtype="int32")
        assert input_data.shape == (1, seq_lens[0], num_inputs)
        num_outputs = 3

        print("Storing network with cuDNN.")
        tf.reset_default_graph()
        with tf.Session() as session:
            config1 = Config()
            config1.update({
                "num_outputs": num_outputs,
                "num_inputs": num_inputs,
                "network": {
                    "layer1": {
                        "class": "rec",
                        "n_out": 6,
                        "unit": "CudnnLSTM"
                    },
                    "layer2": {
                        "class": "rec",
                        "n_out": 6,
                        "unit": "CudnnLSTM",
                        "from": ["layer1"]
                    },
                    "output": {
                        "class": "linear",
                        "activation": None,
                        "n_out": num_outputs,
                        "from": ["layer2"]
                    }
                }
            })
            network1 = TFNetwork(config=config1, train_flag=True)
            network1.construct_from_dict(config1.typed_dict["network"])
            network1.initialize_params(session=session)
            params = {
            }  # type: dict[str,dict[str,numpy.ndarray]]  # layer -> param -> numpy.ndarray
            for layer_name, layer1 in sorted(network1.layers.items()):
                print("layer: %r" % layer_name)
                assert isinstance(layer1, LayerBase)
                params[layer_name] = {}
                for param_name, param1 in sorted(layer1.params.items()):
                    print("  param %r: %r" % (param_name, param1))
                    params[layer_name][param_name] = param1.eval(session)
                    if param1 in layer1.saveable_param_replace:
                        saveable_object = layer1.saveable_param_replace[param1]
                        print("    saveable object: %r" % saveable_object)
                        assert isinstance(saveable_object,
                                          BaseSaverBuilder.SaveableObject)
                        print("      op: %r" % saveable_object.op)
                        print("      name: %r" % saveable_object.name)
                        for spec in saveable_object.specs:
                            print("      spec: %r" % spec)
                            assert isinstance(spec, BaseSaverBuilder.SaveSpec)
                            print("        name: %r" % spec.name)
                            print("        tensor: %r" % spec.tensor)
            output_data1 = session.run(
                network1.get_default_output_layer().output.placeholder,
                feed_dict={
                    network1.extern_data.data["data"].placeholder: input_data,
                    network1.extern_data.data["data"].size_placeholder[0]:
                    seq_lens
                })
            assert_equal(output_data1.shape,
                         (seq_lens[0], 1, num_outputs))  # (time, batch, dim)
            print("Saveable params:", network1.get_saveable_params_list())
            network1.save_params_to_file(filename=model_filename,
                                         session=session)
        print()

        # First test if we can load the same network as-is. This will involve the RNNParamsSaveable.
        print("Testing restore of same network with cuDNN.")
        tf.reset_default_graph()
        with tf.Session() as session:
            network1a = TFNetwork(config=config1, train_flag=True)
            network1a.construct_from_dict(config1.typed_dict["network"])
            network1a.load_params_from_file(filename=model_filename,
                                            session=session)
            for layer_name, layer1 in sorted(network1a.layers.items()):
                print("layer: %r" % layer_name)
                for param_name, param1 in sorted(layer1.params.items()):
                    print("  param %r: %r" % (param_name, param1))
                    param1old = params[layer_name][param_name]
                    param1new = param1.eval(session)
                    numpy.testing.assert_almost_equal(param1old, param1new)
            output_data1a = session.run(
                network1a.get_default_output_layer().output.placeholder,
                feed_dict={
                    network1a.extern_data.data["data"].placeholder:
                    input_data,
                    network1a.extern_data.data["data"].size_placeholder[0]:
                    seq_lens
                })
            numpy.testing.assert_almost_equal(output_data1, output_data1a)
        print()

        print("Testing restore of network with LSTMBlockCell.")
        tf.reset_default_graph()
        with tf.Session() as session:
            # Now, in CPU, we would automatically use LSTMBlockCell instead.
            # Check if the import of the model works correctly in load_params_from_file().
            config2 = Config()
            config2.update({
                "num_outputs": num_outputs,
                "num_inputs": num_inputs,
                "network": {
                    "layer1": {
                        "class": "rec",
                        "n_out": 6,
                        "unit": "LSTMBlockFused"
                    },
                    "layer2": {
                        "class": "rec",
                        "n_out": 6,
                        "unit": "LSTMBlockFused",
                        "from": ["layer1"]
                    },
                    "output": {
                        "class": "linear",
                        "activation": None,
                        "n_out": num_outputs,
                        "from": ["layer2"]
                    }
                }
            })
            network2 = TFNetwork(config=config2, train_flag=True)
            network2.construct_from_dict(config2.typed_dict["network"])
            network2.load_params_from_file(filename=model_filename,
                                           session=session)
            output_data2 = session.run(
                network2.get_default_output_layer().output.placeholder,
                feed_dict={
                    network2.extern_data.data["data"].placeholder: input_data,
                    network2.extern_data.data["data"].size_placeholder[0]:
                    seq_lens
                })
            # Not sure if sth is incorrect... Only decimal=2 works.
            numpy.testing.assert_almost_equal(output_data1,
                                              output_data2,
                                              decimal=2)

    finally:
        shutil.rmtree(model_tmp_dir)
Beispiel #50
0
def test_engine_search_attention():
    from GeneratingDataset import DummyDataset
    seq_len = 5
    n_data_dim = 2
    n_classes_dim = 3
    dataset = DummyDataset(input_dim=n_data_dim,
                           output_dim=n_classes_dim,
                           num_seqs=2,
                           seq_len=seq_len)
    dataset.init_seq_order(epoch=1)
    print("Hello search!")

    config = Config()
    config.update({
        "model": "/tmp/model",
        "batch_size": 5000,
        "max_seqs": 2,
        "num_outputs": n_classes_dim,
        "num_inputs": n_data_dim,
        "network": {
            "encoder": {
                "class": "linear",
                "activation": "tanh",
                "n_out": 5
            },
            "output": {
                "class": "rec",
                "from": [],
                "unit": {
                    'output': {
                        'class': 'choice',
                        'target': 'classes',
                        'beam_size': 4,
                        'from': ["output_prob"]
                    },
                    "end": {
                        "class": "compare",
                        "from": ["output"],
                        "value": 0
                    },
                    'orth_embed': {
                        'class': 'linear',
                        'activation': None,
                        'from': ['output'],
                        "n_out": 7
                    },
                    "s": {
                        "class": "rnn_cell",
                        "unit": "LSTMBlock",
                        "from": ["prev:c", "prev:orth_embed"],
                        "n_out": 7
                    },
                    "c_in": {
                        "class": "linear",
                        "activation": "tanh",
                        "from": ["s", "prev:orth_embed"],
                        "n_out": 5
                    },
                    "c": {
                        "class": "dot_attention",
                        "from": ["c_in"],
                        "base": "base:encoder",
                        "base_ctx": "base:encoder"
                    },
                    "output_prob": {
                        "class": "softmax",
                        "from": ["prev:s", "c"],
                        "target": "classes",
                        "loss": "ce"
                    }
                },
                "target": "classes",
                "max_seq_len": 10
            },
            "decision": {
                "class": "decide",
                "from": ["output"],
                "loss": "edit_distance"
            }
        }
    })
    engine = Engine(config=config)
    print("Init network...")
    engine.start_epoch = 1
    engine.use_dynamic_train_flag = False
    engine.use_search_flag = True
    engine.init_network_from_config(config)
    print("network:")
    pprint(engine.network.layers)
    assert "output" in engine.network.layers
    assert "decision" in engine.network.layers

    print("Search...")
    engine.search(dataset=dataset)
    print("error keys:")
    pprint(engine.network.error_by_layer)
    assert engine.network.total_objective is not None
    assert "decision" in engine.network.error_by_layer

    engine.finalize()
Beispiel #51
0
 def __init__(self, config=None, blocking=True):
   if not config:
     config = Config()
     config.update(dummyconfig_dict)
   super(DummyDevice, self).__init__(device="cpu", config=config, blocking=blocking)
Beispiel #52
0
import os
import time
import tensorflow as tf
import datetime as dt
import gym
import policy_gradient
from time import gmtime, strftime

from Config import Config

conf = Config()


def generate_predictions(ticker):

    global pg

    # Initialize environment and make prediction
    env = gym.make(conf.env_name)
    env = env.unwrapped
    prediction = pg.test_model(env)

    if prediction == 0:
        pred_str = 'BUY (0)'
    elif prediction == 1:
        pred_str = 'FLAT (1)'
    elif prediction == 2:
        pred_str = 'SELL (2)'

    print('---------Predicted action is:')
    print(pred_str)
Beispiel #53
0
    def __init__(self):
        '''GUI的初始化
        '''
        self.logger = logging.getLogger(__name__)
        self.logger.info("开始记录日志")
        try:
            self.logger.info("开始检查网络连接")
            # requests.get("https://www.baidu.com", verify=False) # 访问百度以验证网络是否通畅
            # self.logger.info("网络连接成功")
            self.logger.info("开始加载配置文件")
            self.cf = Config()
            self.APP_ID = self.cf.APP_ID
            self.API_KEY = self.cf.API_KEY
            self.SECRET_KEY = self.cf.SECRET_KEY
            self.MK_APIKEY = self.cf.MK_APIKEY

            self.logger.info("开始加载窗体")
            self.window = tk.Tk()
            self.window.title('截图翻译工具(V4.5)')
            self.window.attributes("-alpha", 0.9)
            self.window['background'] = 'PowderBlue'

            self.filename = resource_path(
                os.path.join("res", "bitbug_favicon.ico"))

            self.window.iconbitmap(self.filename)

            self.window.resizable(0, 0)

            self.menubar = tk.Menu(self.window)

            self.filemenu = tk.Menu(self.menubar, tearoff=0)
            self.menubar.add_cascade(label='导入文件', menu=self.filemenu)
            self.filemenu.add_command(label='从文本文件导入', command=self.getFromTXT)
            self.filemenu.add_command(label='从图像文件导入',
                                      command=self.getFromImage)

            self.setmenu = tk.Menu(self.menubar, tearoff=0)
            self.menubar.add_cascade(label='设置', menu=self.setmenu)
            self.setmenu.add_command(label='查看APP_ID', command=self.getAppId)
            self.setmenu.add_command(label='生成配置文件', command=self.useConfig)

            self.helpmenu = tk.Menu(self.menubar, tearoff=0)
            self.menubar.add_cascade(label='帮助', menu=self.helpmenu)
            self.helpmenu.add_command(label='关于', command=self.aboutClick)
            self.helpmenu.add_command(label='更新与反馈', command=self.upClick)

            self.window.config(menu=self.menubar)

            tk.Label(self.window, text="原文:",
                     background='PowderBlue').grid(row=0,
                                                   column=0,
                                                   sticky=tk.W)

            self.comboxlist0 = ttk.Combobox(self.window)
            self.comboxlist0["values"] = ("使用谷歌翻译", "C/C++代码美化", "发布临时文字")
            self.comboxlist0.bind("<<ComboboxSelected>>", self.comboxlist0_msg)

            self.comboxlist0["state"] = "readonly"
            self.comboxlist0.current(0)
            self.comboxlist0.grid(row=0,
                                  column=0,
                                  columnspan=2,
                                  padx=38,
                                  sticky=tk.W)

            self.comboxlist = ttk.Combobox(self.window)
            self.comboxlist["values"] = ("翻译为中文(简体)", "翻译为中文(繁体)", "翻译为英文",
                                         "翻译为日文", "翻译为韩文")
            self.comboxlist["state"] = "readonly"
            self.comboxlist.current(0)
            self.comboxlist.grid(row=0, column=1, sticky=tk.E)

            self.inputText = tk.Text(self.window, height=12, width=50)
            self.inputText.grid(row=1, column=0, columnspan=2, padx=8, pady=8)

            self.inputTextsection = section()
            self.inputTextsection.s = self.inputText
            self.inputTextmenu = tk.Menu(self.window, tearoff=0)
            self.inputTextmenu.add_command(
                label="复制", command=self.inputTextsection.onCopy)
            self.inputTextmenu.add_separator()
            self.inputTextmenu.add_command(
                label="粘贴", command=self.inputTextsection.onPaste)
            self.inputTextmenu.add_separator()
            self.inputTextmenu.add_command(label="剪切",
                                           command=self.inputTextsection.onCut)
            self.inputText.bind(
                "<Button-3>", lambda event: self.inputTextmenu.post(
                    event.x_root, event.y_root))

            tk.Label(self.window, text="译文:",
                     background='PowderBlue').grid(row=2,
                                                   column=0,
                                                   sticky=tk.W)

            self.translateBtn = tk.Button(self.window,
                                          text="翻译",
                                          bg='Lavender',
                                          command=self.translateBtnClick)
            self.translateBtn.grid(row=2, column=0, padx=40, sticky=tk.W)

            self.OCRLANGUAGE = 'eng'
            self.comboxlist1 = ttk.Combobox(self.window)
            self.comboxlist1["values"] = ("使用百度OCR", "使用OCRMAKER")
            self.comboxlist1.bind("<<ComboboxSelected>>", self.comboxlist1_OCR)
            self.comboxlist1["state"] = "readonly"
            self.comboxlist1.current(0)
            self.comboxlist1.grid(row=2,
                                  column=0,
                                  padx=80,
                                  columnspan=2,
                                  sticky=tk.W)

            self.clearBtn = tk.Button(self.window,
                                      text="清屏",
                                      bg='Lavender',
                                      command=self.clearBtnClick)
            self.clearBtn.grid(row=2, column=1, padx=90, sticky=tk.E)

            self.cb1var = tk.IntVar()
            self.cb1var.set(1)
            self.cb1 = tkinter.Checkbutton(self.window,
                                           text='监听剪切板',
                                           background='PowderBlue',
                                           variable=self.cb1var)
            self.cb1.grid(row=2, column=0, columnspan=2, sticky=tk.E)

            self.outText = tk.Text(self.window, height=12, width=50)
            self.outText.grid(row=3, column=0, columnspan=2, padx=8, pady=8)

            self.outTextsection = section()
            self.outTextsection.s = self.outText
            self.outTextmenu = tk.Menu(self.window, tearoff=0)
            self.outTextmenu.add_command(label="复制",
                                         command=self.outTextsection.onCopy)
            self.outTextmenu.add_separator()
            self.outTextmenu.add_command(label="粘贴",
                                         command=self.outTextsection.onPaste)
            self.outTextmenu.add_separator()
            self.outTextmenu.add_command(label="剪切",
                                         command=self.outTextsection.onCut)
            self.outText.bind(
                "<Button-3>", lambda event: self.outTextmenu.post(
                    event.x_root, event.y_root))
            # self.window.bind(sequence="<Key>", func=self.outTexeP)

            self.window.state('normal')
            self.window.wm_attributes('-topmost', 1)

            self.window.protocol("WM_DELETE_WINDOW", self.on_closing)
            self.menu_options = (("显示", None, self.showwin), )
            systray = SysTrayIcon(self.filename,
                                  "截图翻译工具",
                                  self.menu_options,
                                  on_quit=self.on_quit_callback)
            systray.start()
            self.logger.info("窗体加载成功")

            self.logger.info("开始加载插件模块")
            self.maindll = WinDLL(
                resource_path(os.path.join("res", "pymain.dll")))
            dlllist = [
                i for i in glob.glob(r"*.dll") if i[0] == 'p' and i[1] == 'y'
            ]
            self.logger.info("搜寻到如下插件{0}".format(str(dlllist)))
            self.logger.info("开始载入dll")
            self.dlllist = []
            s = create_string_buffer(''.encode(), 8192)  #in dll is 8192
            for i in dlllist:
                self.logger.info("正在载入{0}".format(i))
                dll = WinDLL(os.path.join(os.getcwd(), i))
                self.dlllist.append(dll)
                self.logger.info("载入{0}成功".format(i))
                self.logger.info("正在获取{0}的名字".format(i))
                s = create_string_buffer(''.encode(), 9000)  #in dll is 8192
                dll.name(s)
                s = string_at(s).decode('utf-8')
                self.logger.info("{0}的名字为:{1}".format(i, s))
                self.comboxlist0["values"] = self.comboxlist0["values"] + (s, )

            self.logger.info("开始启动插件事件监听线程")
            _thread.start_new_thread(self.apiloop, ("", ))  # 用于插件的监听
            self.logger.info("插件事件监听线程启动成功")
            self.logger.info("开始启动剪切板事件监听线程")
            _thread.start_new_thread(
                self.thread_fun, (self.window, self.inputText, self.outText))
            self.logger.info("剪切板事件监听线程启动成功")
            self.logger.info("进入窗体消息循环")
            self.window.mainloop()
        except Exception as e:
            self.logger.error(str(e))
            tk.messagebox.showerror(
                title="致命错误",
                message=
                "请将具体信息发送给开发者:\n网页:https://github.com/super1207/translate\n错误信息:"
                + str(e))
Beispiel #54
0
 def getLimitValue(self, limit_name):
     config = Config.getInstance()
     limit_type = self._convertStringToEnum(ELimitType, limit_name)
     return config.getLimitValue(limit_type)
Beispiel #55
0
"""
Wrap mosquito MQTT client and provide basic for receiving and sending data to MQTT broker
handle client and topics registrations
"""

from Config import Config
import paho.mqtt.client as mqtt

conf = Config.Config().get_config()


class _MessageHandler(object):
    def __init__(self, broker_addres, broker_port=1883, keep_alive=30):
        self.broker_address = broker_addres
        self.broker_port = broker_port
        self.keep_alive = keep_alive
        self.client = None
        self.is_connected = False

        try:
            self._connect()
        except ConnectionRefusedError:
            print(conf.get('msg', 'mqtt_error'))

    def _connect(self):
        self.client = mqtt.Client()
        self.client.on_connect = self._on_connect_callback
        self.client.on_disconnect = self._on_disconnect_callback

        try:
            self.client.connect(self.broker_address, self.broker_port,
Beispiel #56
0
 def getGlobalState(self, state_name):
     config = Config.getInstance()
     state_type = self._convertStringToEnum(EGlobalStateType, state_name)
     return config.getGlobalState(state_type)
Beispiel #57
0
def train_label_none_label_classification(label_folder,
                                          non_label_folder,
                                          model_file=None):

    c = Config()

    #  Build or load model
    if model_file is None:
        # create model
        img_input = Input(shape=(28, 28, 3))
        # prediction = model_cnn_2_layer.nn_classify_label_non_label(img_input)
        # prediction = model_cnn_3_layer.nn_classify_label_non_label(img_input)
        prediction = nn_cnn_3_layer.nn_classify_label_non_label(img_input)
        model = Model(inputs=img_input, outputs=prediction)
        model.compile(loss='categorical_crossentropy',
                      optimizer=RMSprop(),
                      metrics=['accuracy'])
    else:
        model = load_model(model_file)

    model.summary()

    # Load and normalize data
    x_train, y_train, x_test, y_test = load_train_validation_data(
        label_folder, non_label_folder)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')

    x_train[:, :, :, 0] -= c.img_channel_mean[0]
    x_train[:, :, :, 1] -= c.img_channel_mean[1]
    x_train[:, :, :, 2] -= c.img_channel_mean[2]
    x_test[:, :, :, 0] -= c.img_channel_mean[0]
    x_test[:, :, :, 1] -= c.img_channel_mean[1]
    x_test[:, :, :, 2] -= c.img_channel_mean[2]

    x_train /= 255
    x_test /= 255
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # x_train.reshape(x_train.shape[0], 28, 28, 3)
    # x_test.reshape(x_test.shape[0], 28, 28, 3)

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, 2)
    y_test = keras.utils.to_categorical(y_test, 2)

    # Checkpointing is to save the network weights only when there is an improvement in classification accuracy
    # on the validation dataset (monitor=’val_acc’ and mode=’max’).
    file_path = "weights-improvement-{epoch:04d}-{val_acc:.4f}.hdf5"
    checkpoint = ModelCheckpoint(file_path,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')
    callbacks_list = [checkpoint]

    model.fit(x_train,
              y_train,
              batch_size=128,
              epochs=100,
              verbose=1,
              callbacks=callbacks_list,
              validation_data=(x_test, y_test))
    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])

    model.save('final_model.h5')
Beispiel #58
0
 def setGlobalState(self, state_name, state_value):
     config = Config.getInstance()
     state_type = self._convertStringToEnum(EGlobalStateType, state_name)
     config.setGlobalState(state_type, state_value)
Beispiel #59
0

"""

from Config import Config
from GraphUtil import convNodeToProblems, convAllNodeToProblems
from MasterEncoder import MasterEncoder
import warnings
from Fingerprint import Fingerprint
from CompDatabase import CompDatabase
from OtherEncoders import ValueEncoder
from tqdm import tqdm
import numpy as np

warnings.filterwarnings('ignore')
CF = Config()


class PrepDataset:
    def __init__(self, graphBasePath="graph/"):
        self.mode = "Training"
        self.graphList = []
        self.parallel = True

    #set compound encoder
    def setCompoundEncoder(self, compFilename):
        """
        compFilename: csv path for a file containing smiles
        calc descriptors and save those info as self.CompDat
        
        """
Beispiel #60
0
    def create_profile(self, profile):
        config = Config()
        expdate = profile.exp_date.split()
        exp_month = expdate[0]

        if '0' in exp_month[0]:
            exp_month_value = exp_month[1]
        else:
            exp_month_value = exp_month
        exp_year = f'20{expdate[2]}'
        file = str([{

            "name": profile.profile_name,
            "uuid": f'{config.get_random_string_lowercase(8)}-{config.get_random_string_lowercase(4)}-{config.get_random_string_lowercase(4)}-{config.get_random_string_lowercase(4)}-{config.get_random_string_lowercase(12)}',
            "email": profile.email,
            "shipping": {

                "first_name": profile.first_name,
                "last_name": profile.last_name,
                "address_line_1": profile.address,
                "address_line_2": profile.apt,
                "company": "",
                "state": {
                    "label": profile.state, "value": config.us_state_abbreviations[profile.state]
                }

                ,
                "country": {
                    "label": profile.country, "value": config.country_abbrev[profile.country]
                }

                ,
                "phone": profile.phone,
                "city": profile.city,
                "zipcode": profile.zip_code
            }

            ,
            "billing": {

                "first_name": profile.first_name,
                "last_name": profile.last_name,
                "address_line_1": profile.address,
                "address_line_2": profile.apt,
                "company": "",
                "state": {
                    "label": profile.state, "value": config.us_state_abbreviations[profile.state]
                }

                ,
                "country": {
                    "label": profile.country, "value": config.country_abbrev[profile.country]
                }

                ,
                "phone": profile.phone,
                "city": profile.city,
                "zipcode": profile.zip_code
            }

            ,
            "card": {

                "number": profile.card_number,
                "expiry": {
                    "month": {
                        "label": exp_month, "value": exp_month_value
                    }

                    ,
                    "year": {
                        "label": exp_year, "value": exp_year
                    }
                }

                ,
                "cvv": profile.cvv
            }

            ,
            "sameAddress": "true",
            "id": config.get_random_string_uppercase(9)
        }

        ])
        file = file.replace("'", '"').replace(": ", ":").replace(", ", ",").replace('"true"', "true")
        file = StringIO(file)
        return file