Esempio n. 1
0
    async def connect(self):
        """
		Connect to the desired channel. This should be called immediately after creating the object.
		
		:raises TimeoutConnectingError: Timed out connecting to the specified channel.
		:raises discord.opus.OpusNotLoaded: The Opus library needed for voice connectivity is not loaded.
		:raises GuildAlreadyConnected: There is already another active voice connection in the target guild.
		:rtype: None
		"""
        global _active_ifaces
        try:
            self._client = await self.channel.connect(timeout=5)
        except asyncio.TimeoutError:
            raise TimeoutConnectingError(
                "Could not connect to the channel in time")
        except discord.opus.OpusNotLoaded:
            # commands we run sohuld check for this soooooooooooooooo
            raise
        except discord.ClientException:
            # We're already connected somewhere else in this guild. That's the
            # fault of the VoiceManager object
            raise GuildAlreadyConnected()
        except Exception:
            log.critical(
                "modules.music.interface.MusicInterface: UNKNOWN EXCEPTION connecting to a VoiceChannel. "
                "Perhaps the documentation is incomplete?")
            # wtf do we do??
            raise

        self.initialized = True
        _active_ifaces[self.channel] = self
        self._client.play(sources.EmptySource())  # Clear a TX bug when joining
Esempio n. 2
0
async def run_redis(redis_hostport, broadcast):
    log.info("Connecting to redis:", redis=redis_hostport)
    host, port = redis_hostport.split(":")
    connection = await asyncio_redis.Connection.create(host=host,
                                                       port=int(port))
    log.info("Connected to redis", redis=redis_hostport)
    subscriber = await connection.start_subscribe()
    await subscriber.subscribe([THE_HEADS_EVENTS])

    while True:
        try:
            reply = await subscriber.next_published()
            msg = json.loads(reply.value)

            data = msg['data']
            src = data.get('cameraName') or data.get(
                'headName') or data['name']

            REDIS_MESSAGE_RECEIVED.labels(
                reply.channel,
                msg['type'],
                src,
            ).inc()

            if msg['type'] == "motion-detected":
                broadcast("motion-detected",
                          camera_name=data["cameraName"],
                          position=data["position"])

            if msg['type'] in ("head-positioned", "active", "kinect"):
                broadcast(msg['type'], msg=msg)

        except Exception as e:
            log.critical("Exception processing redis message", exception=e)
Esempio n. 3
0
def get_all_directories_with_name(look_in_dir='', base_name='', verbose=False):
    '''
    Finds all the directories in a "look_in_dir" which contains a "base_name".

    Returns: a list of strings

    Usage:     iterate_over = get_all_directories_with_name(dir, name)
    then:      for swwfile in iterate_over:
                   do stuff

    Check "export_grids" and "get_maximum_inundation_data" for examples
    '''

    if look_in_dir == "":
        look_in_dir = "."                                  # Unix compatibility

    dir_ls = os.listdir(look_in_dir)
    iterate_over = [x for x in dir_ls if base_name in x]

    if len(iterate_over) == 0:
        msg = 'No files of the base name %s' % base_name
        raise IOError, msg

    if verbose: log.critical('iterate over %s' % iterate_over)

    return iterate_over
Esempio n. 4
0
def get_all_swwfiles(look_in_dir='', base_name='', verbose=False):
    '''
    Finds all the sww files in a "look_in_dir" which contains a "base_name".
    will accept base_name with or without the extension ".sww"

    Returns: a list of strings

    Usage:     iterate_over = get_all_swwfiles(dir, name)
    then
               for swwfile in iterate_over:
                   do stuff

    Check "export_grids" and "get_maximum_inundation_data" for examples
    '''

    # plus tests the extension
    name, extension = os.path.splitext(base_name)

    if extension != '' and extension != '.sww':
        msg = 'file %s%s must be a NetCDF sww file!' % (base_name, extension)
        raise IOError, msg

    if look_in_dir == "":
        look_in_dir = "."                                   # Unix compatibility

    dir_ls = os.listdir(look_in_dir)
    iterate_over = [x[:-4] for x in dir_ls if name in x and x[-4:] == '.sww']
    if len(iterate_over) == 0:
        msg = 'No files of the base name %s' % name
        raise IOError, msg

    if verbose: log.critical('iterate over %s' % iterate_over)

    return iterate_over
Esempio n. 5
0
def write(filename_or_path: Union[str, Path],
          text: str,
          *,
          display=False) -> None:
    """Write text to a given file and optionally log it."""
    if isinstance(filename_or_path, Path):
        path = filename_or_path
    else:
        path = Path(filename_or_path).resolve()
        text = dedent(text)

    message = f"Writing file: {path}"
    line = "=" * (31 + len(message))
    if text:
        content = text.replace(" \n", "␠\n")
    else:
        content = "∅\n"
    if display:
        log.debug(message + "\n" + line + "\n" + content + line)
    else:
        log.critical(message)

    path.parent.mkdir(parents=True, exist_ok=True)
    path.write_text(text)
    time.sleep(
        settings.WRITE_DELAY)  # ensure the file modification time changes
Esempio n. 6
0
 def before_daemonize(self):
     log.info("Starting inGraph-cleanup daemon..")
     try:
         databse_config = file_config('ingraph-database.conf')
     except IOError, e:
         log.critical(e)
         sys.exit(1)
Esempio n. 7
0
File: db.py Progetto: konlil/CarPot
 def __init__(self):
     super(DBSqlite, self).__init__()
     self.conn = sqlite3.connect(DB_NAME)
     if not self.conn:
         log.critical("create db connection failed. " + str(self))
     else:
         log.info("connection to db: %s" % DB_NAME)
Esempio n. 8
0
 def before_daemonize(self):
     log.info("Starting inGraph-cleanup daemon..")
     try:
         databse_config = file_config('ingraph-database.conf')
     except IOError, e:
         log.critical(e)
         sys.exit(1)
def error(exception, code=400):
    log.critical(str(exception))
    resp = {
        "message": str(exception),
        "code": code,
    }
    return jsonify(resp), code
Esempio n. 10
0
def copy_code_files(dir_name, filename1, filename2=None, verbose=False):
    """Copies "filename1" and "filename2" to "dir_name".

    Each 'filename' may be a string or list of filename strings.

    Filenames must be absolute pathnames
    """

    def copy_file_or_sequence(dest, file):
        if hasattr(file, '__iter__'):
            for f in file:
                shutil.copy(f, dir_name)
                if verbose:
                    log.critical('File %s copied' % f)
        else:
            shutil.copy(file, dir_name)
            if verbose:
                log.critical('File %s copied' % file)

    # check we have a destination directory, create if necessary
    if not os.path.isdir(dir_name):
        if verbose:
            log.critical('Make directory %s' % dir_name)
        os.mkdir(dir_name, 0777)

    if verbose:
        log.critical('Output directory: %s' % dir_name)        

    copy_file_or_sequence(dir_name, filename1)

    if not filename2 is None:
        copy_file_or_sequence(dir_name, filename2)
Esempio n. 11
0
	def _AdjustTimeForRec__check(self):
		new_rec_start = self.new_rec.GetScheduledStartTime()
		for rec in self.current_rec_list:
			if not self.new_rec.GetScheduledStartTime() < rec.GetScheduledStopTime():
				pass
			conflict_with_padding = self.new_rec.GetScheduledStartTime() < rec.GetScheduledStopTime()
			if not self.new_rec.start_time < rec.stop_time:
				pass
			conflict_without_padding = self.new_rec.start_time < rec.stop_time
			if conflict_with_padding and not conflict_without_padding:
				if rec.stop_time:
					half_time = int((self.new_rec.start_time - rec.stop_time) / 2) + rec.stop_time
				else:
					half_time = int(self.new_rec.start_padding / 2) + self.new_rec.start_time
				if half_time > new_rec_start:
					new_rec_start = half_time
				self.result.append(rec)
				continue
			if rec.type == RECORDING_TYPE_NOT_SCHEDULED:
				self.result.append(rec)
				continue
			if self.new_rec.type == RECORDING_TYPE_NOT_SCHEDULED:
				log.debug('trying to start an instant record during padding of a scheduled')
				self.new_rec.status = RECORDING_STATUS_ABORTED
				continue
			self.new_rec.status = RECORDING_STATUS_ABORTED
			log.critical('SHOULD NOT HAPPEN => BUG => CHECK ALGO')
		filter((lambda rec: new_rec_start < rec.GetScheduledStopTime():
			pass
		return new_rec_start < rec.GetScheduledStopTime()), self.result)
		return new_rec_start
Esempio n. 12
0
def rmgeneric(path, func, verbose=False):
    ERROR_STR= """Error removing %(path)s, %(error)s """

    try:
        func(path)
        if verbose: log.critical('Removed %s' % path)
    except OSError, (errno, strerror):
        log.critical(ERROR_STR % {'path' : path, 'error': strerror })
Esempio n. 13
0
def logbreak(message: str = "") -> None:
    """Insert a noticeable logging record for debugging."""
    width = get_terminal_size().columns - 31
    if message:
        line = "-" * (width - len(message) - 1) + " " + message
    else:
        line = "-" * width
    log.critical(line)
Esempio n. 14
0
	def long_help(self, cmd: str, mapping: Dict[str, str]):
		if cmd.strip() == "" or "" in list(mapping.values()) + list(mapping.keys()):
			# if any blank values, throw an error
			log.critical("Blank content in help message: running the help command with this blank content *will* throw an error. Traceback logged under debug level.")
			log.debug("".join(traceback.format_stack()))

		self._long_help[cmd] = mapping
		log.debug(f"registered new long_help entry for command {cmd}")
Esempio n. 15
0
def loadModule(location, module):
    try:
        module = __import__(location, "", "", module, -1)
    except ImportError:
        log.critical(ImportError)
        return None
        
    return module
Esempio n. 16
0
async def set_mod_role():
    global admin_role
    try:
        admin_role = admin_role = client.get_guild(
            458765854624972811).get_role(458767248367157278), client.get_guild(
                458765854624972811).get_role(882726753074819124)
    except AttributeError:
        log.critical(f"FAU mod role not found, oof")
Esempio n. 17
0
    def run(self):
        '''
        核心请求函数
        :return(str or None):   html,请求的html源码
        '''
        index = 0
        while index <= self.crawlConfig['maxtime']:
            try:
                try:
                    if self.isProxy or self.proxyPools:
                        proxy = self.get_proxy()
                        proxyHandler = urllib.request.ProxyHandler(proxy)
                        opener = urllib.request.build_opener(proxyHandler)
                    else:
                        opener = urllib.request.build_opener()
                    if not self.data:
                        req = urllib.request.Request(self.url,
                                                     headers=self.urlConfig)
                    else:
                        if self.dataType == 'json':
                            data = json.dumps(self.data)
                        else:
                            data = urllib.parse.urlencode(self.data)
                        data = data.encode('utf8')
                        req = urllib.request.Request(self.url,
                                                     headers=self.urlConfig,
                                                     data=data)
                    res = opener.open(req)
                    if res.status != 200:
                        raise Exception('status code is not 200 ! ')
                    if self.isBinary:
                        self.html = res.read()
                    else:
                        self.html = res.read().decode(
                            self.crawlConfig['encoding'], errors='ignore')
                    opener.close()
                    return self.html

                except http.client.BadStatusLine as e:
                    index += 1
                    log.error('BadStatusLine Error, URL:%s' % self.url)

                except urllib.error.URLError as e:
                    index += 0.2
                    log.error('URLError, URL:%s, ERROR:%s' %
                              (self.url, str(e)))

                except Exception as e:
                    index += 1
                    log.error('Other Error, URL:%s, ERROR:%s' %
                              (self.url, str(e)))
            except Exception as e:
                index += 1
                log.critical('...' + str(e))
        log.critical('Index is over than %s times,crawl fail, URL;%s' %
                     (self.crawlConfig['maxtime'], self.url))
        self.html = None
Esempio n. 18
0
File: db.py Progetto: konlil/CarPot
 def __init__(self, host, user, pwd, db):
     super(DBMSSql, self).__init__()
     log.info("connecting db: %s %s %s %s" % (host, user, pwd, db))
     self.conn = pymssql.connect(host=host, user=user, password=pwd, database=db, charset="utf8")
     cur = self.conn.cursor()
     if not cur:
         log.critical("create db connection failed." + str(self))
     else:
         log.info("connection to db: %s" % db)
Esempio n. 19
0
 def excepthook(type, value, tb):
     msg = '\n' + '=' * 80
     msg += '\nUncaught exception:\n'
     msg += ''.join(traceback.format_exception(type, value, tb))
     msg += '=' * 80 + '\n'
                                         
     print(msg)
     log.critical(msg)
     sys.exit(1)
Esempio n. 20
0
    def run(self, *args, **kwargs) -> None:
        self.first_execution = time.perf_counter(
        )  # monotonic on both Windows and Linux which is :thumbsup:
        self.first_execution_dt = datetime.datetime.utcnow()
        if not kwargs.get("bot", True):
            log.fatal(
                "tried to login with a non-bot token (this framework is designed to run with a bot account)"
            )
            raise UserBotError("Non-bot accounts are not supported")

        # checks to make sure everything is a coroutine
        if config.debug:

            if any([
                    not asyncio.iscoroutinefunction(x)
                    for x in self._ready_handlers
            ]):
                log.critical("not all ready functions are coroutines")
                raise HandlerError("not all ready functions are coroutines")

            if any([
                    not asyncio.iscoroutinefunction(x)
                    for x in self._shutdown_handlers
            ]):
                log.critical("not all shutdown functions are coroutines")
                raise HandlerError("not all shutdown functions are coroutines")

            if any([
                    not asyncio.iscoroutinefunction(x)
                    for x in self._message_handlers
            ]):
                log.critical("not all message handlers are coroutines")
                raise HandlerError("not all message handlers are coroutines")

            if any([
                    not asyncio.iscoroutinefunction(x)
                    for x in self._member_join_handlers
            ]):
                log.critical("not all member join handlers are coroutines")
                raise HandlerError(
                    "not all member join handlers are coroutines")

            if any([
                    not asyncio.iscoroutinefunction(x)
                    for x in self._member_remove_handlers
            ]):
                log.critical("not all member leave handlers are coroutines")
                raise HandlerError(
                    f"not all member leave handlers are coroutines")

            log.debug("all functions good to run (are coroutines)")

        log.info(
            f"Bot started at {str(self.first_execution_dt)} ({self.first_execution})"
        )
        super().run(*args, **kwargs)
Esempio n. 21
0
 def copy_file_or_sequence(dest, file):
     if hasattr(file, '__iter__'):
         for f in file:
             shutil.copy(f, dir_name)
             if verbose:
                 log.critical('File %s copied' % f)
     else:
         shutil.copy(file, dir_name)
         if verbose:
             log.critical('File %s copied' % file)
Esempio n. 22
0
    def start(self):
        while not self.urlQueue.empty():
            urlInfo = self.urlQueue.get()
            requestInfo = self.setRequestInfo(url=urlInfo['url'])
            html = self.getHtml(requestInfo)
            data = self.htmlParse(html, urlInfo)
            self.saveInfo('f', data, 'bsr_cate_' + str(self.taskId) + '.txt')

        print("任务结束")
        log.critical("亚马逊品类信息抓取完毕")
Esempio n. 23
0
def crash_info_excepthook(exctype, value, traceback):
    log.critical("""
  --- config.info: 
%s

  --- config.settings:
%s
  """, config.info_str(), config.settings_str())

    orig_excepthook(exctype, value, traceback)
Esempio n. 24
0
    def run(self, instance_id):
        try:
            log.info(
                f"getting data from process memory with instance id {instance_id}"
            )
            head = self.get_head_of_process_memory(instance_id)
            log.info("extracting data from dataset")
            self.extract_head(head)
            log.info("getting items to persist")
            items = self.get_items_to_persist(self.entities, instance_id)
            log.info(f"should persist {len(items)} objects in database")
            instances = self.persist(items, self.scope)
            log.info("objects persisted")
            parts = self.event["name"].split(".")
            parts.pop()
            parts.append("done")
            name = ".".join(parts)
            log.info(f"pushing event {name} to event manager")
            operation = self.operationService.find_by_name_and_version(
                self.event["name"], self.event["version"])
            if not operation:
                raise Exception(
                    f"operation not found from event {self.event['name']} in version {self.event['version']}"
                )
            evt = {
                "name": operation.event_out,
                "version": operation.version,
                "idempotencyKey": self.event["idempotencyKey"],
                "systemId": self.event["systemId"],
                "tag": self.event["tag"],
                "instanceId": instance_id,
                "scope": self.event["scope"],
                "branch": self.event["branch"],
                "reprocessing": self.event["reprocessing"],
                "payload": {
                    "instance_id": instance_id
                }
            }
            event_manager.push(evt)
            #ReprocessingManager(self.process_id, self.instance_id).dispatch_reprocessing_events(instances)

        except Exception as e:
            event_manager.push({
                "name": "system.process.persist.error",
                "tag": self.event["tag"],
                "instanceId": instance_id,
                "payload": {
                    "instance_id": instance_id,
                    "origin": self.event
                }
            })
            log.info("exception occurred")
            log.critical(e)
            raise e
Esempio n. 25
0
	def basic_help(self, title: str, desc: str, include_prefix: bool = True):
		# check first that nothing's blank
		if title.strip() == "" or desc.strip() == "":
			log.critical("Blank content in help message: running the help command with this blank content *will* throw an error. Traceback logged under debug level.")
			log.debug("".join(traceback.format_stack()))

		if include_prefix:
			self._basic_help.update({f"{self.default_prefix}{title}": desc})
		else:
			self._basic_help.update({f"{title}": desc})
		log.debug(f"registered new basic_help entry under the title {title}")
Esempio n. 26
0
async def _task_wrapper(task, allow_cancel: bool):
    try:
        await task

    except asyncio.CancelledError:
        if allow_cancel:
            log.info("Task was cancelled", task=str(task))
        else:
            log.critical("uncaught exception")

    except Exception as e:
        log.critical("uncaught exception")
Esempio n. 27
0
def main():
    args = parse_argument()
    log.config(log.level(args.log_level))
    try:
        if args.command == 'update':
            download.lost_packages()
        elif args.command == 'range':
            download.start_working(files=(range(args.start, args.end+1)),
                                   threads=args.thread_n)
    except KeyboardInterrupt:
        log.debug('Forced crawler to stop...')
    except Exception:
        log.critical(bt_str(traceback))
Esempio n. 28
0
    async def _run_scenes(self, scenes):
        for scene in scenes:
            log.info(f"running scene", scene=scene.__name__)
            task: asyncio.Task = util.create_task(scene(self),
                                                  allow_cancel=True)

            timeout = timeouts(scene)

            try:
                await asyncio.wait_for(task, timeout=timeout)
            except asyncio.TimeoutError:
                pass
            except Exception as e:
                log.critical("scene caused exception", exception=str(e))
Esempio n. 29
0
	def queryTotal(self):
		#用cache缓存,减少数据库查询
		if self.cache_stot.has_key(self.pid):
			return self.cache_stot[self.pid]

		column_name = config.get('table', 'park_info_column_total')
		cmd = "select %s from %s where id=%d;"%(column_name, self.tblname, self.pid)
		cursor = db.obj.Exec(cmd)
		results = cursor.fetchone()
		if results and results[0] > 0:
			self.cache_stot[self.pid] = results[0]
			return results[0]
		else:
			log.critical("query part info failed, tblname: %s, id: 0x%0X" %( self.tblname, self.pid ))
			return 0
Esempio n. 30
0
def read(filename: str, *, display=False) -> str:
    """Read text from a file and optionally log it."""
    path = Path(filename).resolve()
    message = f'Reading file: {path}'
    line = '=' * (31 + len(message))
    text = path.read_text()
    if text:
        content = text.replace(' \n', '␠\n')
    else:
        content = '∅\n'
    if display:
        log.debug(message + '\n' + line + '\n' + content + line)
    else:
        log.critical(message)
    return text
Esempio n. 31
0
def construct_HpGridN(hpset):
    'factory'
    if hpset == 'grid1':
        return HpGrid1()
    elif hpset == 'grid2':
        return HpGrid2()
    elif hpset == 'grid3':
        return HpGrid3()
    elif hpset == 'grid4':
        return HpGrid4()
    elif hpset == 'grid5':
        return HpGrid5()
    else:
        log.critical('bad hpset value %s' % hpset)
        sys.exit(1)
Esempio n. 32
0
def main():
    args = parse_argument(sys.argv)
    log.config(log.level(args.log_level))

    crawler = master.Master(args.thread_n)
    try:
        crawler.pushUrls(args.urls)
        crawler.run()
    except KeyboardInterrupt:
        log.debug('Force wiki-crawler to stop...')
    except Exception as exception:
        log.critical(exception)
        log.critical(sys.exc_info())
    finally:
        crawler.shutdown()
Esempio n. 33
0
def read(filename: str, *, display=False) -> str:
    """Read text from a file and optionally log it."""
    path = Path(filename).resolve()
    message = f"Reading file: {path}"
    line = "=" * (31 + len(message))
    text = path.read_text()
    if text:
        content = text.replace(" \n", "␠\n")
    else:
        content = "∅\n"
    if display:
        log.debug(message + "\n" + line + "\n" + content + line)
    else:
        log.critical(message)
    return text
Esempio n. 34
0
	def startup(self, addr='0.0.0.0', port = 0):
		self.shutdown()
		self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
		self.set_reuse_addr()
		try:
			self.bind((addr, port))
		except:
			try: self.close()
			except: pass
			log.critical('server startup failed.')
			return -1
		self.listen(65536)
		self.state = gvars.NET_STATE_ESTABLISHED
		self.timeslap = long(time.time()*1000)
		log.info('server startup.')
		return 0
Esempio n. 35
0
def del_dir(path):
    """Recursively delete directory path and all its contents
    """

    if os.path.isdir(path):
        for file in os.listdir(path):
            X = os.path.join(path, file)

            if os.path.isdir(X) and not os.path.islink(X):
                del_dir(X)
            else:
                try:
                    os.remove(X)
                except:
                    log.critical("Could not remove file %s" % X)

        os.rmdir(path)
Esempio n. 36
0
def makeConnection():
    tries = 0
    while True:
        try:
            log.notice("Connecting to server %s:%i..." % (SERVER, PORT))
            irc.connect(SERVER, PORT)
            break
        except:
            if tries == 12:
                log.critical("Failed to establish a connection, giving up")
                return False
            timeout = pow(2, tries)
            log.notice("Connection failed, trying again in %i seconds" % timeout)
            time.sleep(timeout)
            tries += 1
    log.notice("Success!")
    return True
Esempio n. 37
0
	def startup(self, addr='0.0.0.0', port = 0):
		self.shutdown()
		self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
		self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
		try:
			self.sock.bind((addr, port))
		except:
			try: self.sock.close()
			except: pass
			log.critical('server startup failed.')
			return -1
		self.sock.listen(65536)
		self.sock.setblocking(0)
		self.port = self.sock.getsockname()[1]
		self.state = gvars.NET_STATE_ESTABLISHED
		self.timeslap = long(time.time()*1000)
		log.info('server startup.')
		return 0
Esempio n. 38
0
    def __killall(self, signal):
        """
		Internal killall helper function
		"""
        if not self.is_alive():
            self.__dead = True
            self.stopping = False
            return False
        # child needs some assistance with dying ...
        try:
            # kill all applications with the string <appname> in their
            # commandline. This implementation uses the /proc filesystem,
            # it is Linux-dependent.
            unify_name = re.compile('[^A-Za-z0-9]').sub
            appname = unify_name('', self.binary)

            cmdline_filenames = glob.glob('/proc/[0-9]*/cmdline')

            for cmdline_filename in cmdline_filenames:
                try:
                    fd = open(cmdline_filename)
                    cmdline = fd.read()
                    fd.close()
                except IOError:
                    continue
                if unify_name('', cmdline).find(appname) != -1:
                    # Found one, kill it
                    pid = int(cmdline_filename.split('/')[2])
                    try:
                        os.kill(pid, signal)
                    except:
                        pass
        except OSError:
            pass

        log.info('kill -%d %s' % (signal, self.binary))
        if signal == 15:
            cb = notifier.Callback(self.__killall, 9)
            self.__kill_timer = notifier.timer_add(2000, cb)
        else:
            log.critical('PANIC %s' % self.binary)

        return False
Esempio n. 39
0
	def __killall( self, signal ):
		"""
		Internal killall helper function
		"""
		if not self.is_alive():
			self.__dead = True
			self.stopping = False
			return False
		# child needs some assistance with dying ...
		try:
			# kill all applications with the string <appname> in their
			# commandline. This implementation uses the /proc filesystem,
			# it is Linux-dependent.
			unify_name = re.compile( '[^A-Za-z0-9]' ).sub
			appname = unify_name( '', self.binary )

			cmdline_filenames = glob.glob( '/proc/[0-9]*/cmdline' )

			for cmdline_filename in cmdline_filenames:
				try:
					fd = open( cmdline_filename )
					cmdline = fd.read()
					fd.close()
				except IOError:
					continue
				if unify_name( '', cmdline ).find( appname ) != -1:
					# Found one, kill it
					pid = int( cmdline_filename.split( '/' )[ 2 ] )
					try:
						os.kill( pid, signal )
					except:
						pass
		except OSError:
			pass

		log.info( 'kill -%d %s' % ( signal, self.binary ) )
		if signal == 15:
			cb = Callback( self.__killall, 9 )
			self.__kill_timer = notifier.timer_add( 2000, cb )
		else:
			log.critical( 'PANIC %s' % self.binary )

		return False
Esempio n. 40
0
	def status(self):
		print("\n\n")
		log.information("Status of: " + self._name)
		if self._shield != 0:
			log.information("Shield: " + str(self._shield))
		else:
			log.warning("shield broken !")
		if self._energy < 10 and self._energy > 0:
			log.warning("energy low")
		elif self._energy == 0:
			log.critical("Energy Empty")

		log.information("Energy: " + str(self._energy))
		log.information("Strengh: " + str(self._strength))
		log.information("Life: " + str(self._life))
		log.information("Dodge: " + str(self._dodge))
		log.information("XP: " + str(self._xp))
		log.information("Credits: " + str(self._credits))
		log.information("Level: " + str(self._level))
Esempio n. 41
0
    def OnEventConnect(self, nErrCode):
        """
        통신 연결 상태 변경시 이벤트

        :param nErrCode: 에러 코드 - 0이면 로그인 성공, 음수면 실패, 에러코드 참조
        """
        print("OnEventConnect received")
        
        if nErrCode == 0:
            print("로그인 성공")
            # 계좌번호 저장
            self.account = self.get_login_info("ACCNO")
            log.info("계좌번호 : " + self.account)
            
            # 다이나믹 종목 정보 요청
            #self.get_dynamic_subject_info()
            self.get_dynamic_subject_code()

            # 초기 데이터 요청
            #self.request_min_info('CLH17', dbsubject.info['CLH17']['시간단위'], "")
            #self.request_min_info('GCG17', dbsubject.info['GCG17']['시간단위'], "")
            
            # 종목 정보 로그 찍기
            #log.info("참여 종목 : %s" % dbsubject.info.values())


        else:
            c_time = "%02d%02d" % (time.localtime().tm_hour, time.localtime().tm_min)

            # 로그인 실패 로그 표시 및 에러코드별 에러내용 발송
            err_msg = "에러코드별 메시지"
            log.critical(err_msg)

            if int(c_time) >= 800 or int(c_time) < 700:
                # 메일 발송
                gmail.send_email('[긴급' + str(c_time) + '] 해동이 작동 중지', '에러코드')

                # 자동이 재시작 로직 작성
                pass

            self.quit()
Esempio n. 42
0
 def run(self):
     try:
         self.work()
     except Exception:
         log.critical('Thread "%s": %s', self.name, exception)
         log.critical(sys.exc_info())
         log.critical('Thread "%s" is forced to terminate...', self.name)
Esempio n. 43
0
 def dispatch_reprocessing_events(self, items):
     log.info("Dispatching reprocessing events")
     events_to_execute = self.get_reprocessing_events(items)
     reprocessing_document = self.get_reprocessing_document(
         events_to_execute)
     if len(reprocessing_document["events"]) == 0:
         return
     if not process_memory.save_document("reprocessing",
                                         reprocessing_document):
         """TODO qual a melhor maneira de tratar esse caso de falha?
             talvez enviar para uma fila no rabbit?
         """
         log.critical(
             "cannot save reprocessing documento on process memory")
         log.critical(reprocessing_document)
         log.critical("Reprocessing aborted!")
         event_manager.push({
             "name": "system.reprocessing.error",
             "payload": {
                 "message":
                 "Reprocessing was aborted due process memory saved reprocessing document",
                 "document": reprocessing_document
             }
         })
     else:
         for event in reprocessing_document["events"]:
             event_manager.push(event)
Esempio n. 44
0
    def getHtml(self, requestInfo):
        url = requestInfo['url']
        headers = requestInfo['headers']
        encoding = requestInfo['encoding']
        maxtimes = requestInfo['max_times']
        timeout = requestInfo['timeout']
        status = False
        times = 0
        #  一个页面最多请求maxtimes
        while times < maxtimes and not status:
            try:
                # 如果使用代理,那么请求的时候加上proxy
                if self.isProxy:
                    proxyIp = random.choice(proxyList)
                    proxies = {
                        "http": "http://" + str(proxyIp),
                        "https": "https://" + str(proxyIp)
                    }

                    res = requests.get(url,
                                       headers=headers,
                                       proxies=proxies,
                                       timeout=timeout)
                else:
                    res = requests.get(url, headers=headers, timeout=timeout)
                res = res.content.decode(encoding)
                if '' == res:
                    times = times + 1
                    continue
                times = times + 1
            except Exception as e:
                log.error('请求 %s 出现错误:%s' % (url, e))
                continue
            status = True
        # 超过最大请求数,记录页面请求失败
        if times >= maxtimes:
            log.critical('%s 页面请求失败' % url)

        return res
Esempio n. 45
0
 def run(self, instance_id):
     log.info("Running merge branch")
     event = self.get_event(instance_id)
     try:
         branch_name = event.get("payload", {}).get("branch")
         log.info(f"Merging {branch_name} into master")
         if not branch_name:
             raise Exception(
                 f"branch name should be passed! received:{branch_name}")
         log.info("Gettings branch links")
         links = self.branch_link.get_links_by_branch(branch_name)
         for link in links:
             _type = link.entity.lower()
             cls = globals()[_type]
             log.info(f"Flipping {_type}")
             self.flip_data(cls, self.session, link.branch_name)
         log.info("Closing branch on apicore")
         self.branch.set_merged(branch_name)
         log.info("Commiting changes to database")
         self.session.commit()
         log.info("Merge success")
         event["name"] = event["name"].replace(".request", ".done")
         event_manager.push(event)
     except Exception as ex:
         log.critical(ex)
         if event:
             event["name"] = event["name"].replace(".request", ".error")
             event["payload"]["message"] = str(ex)
             event_manager.push(event)
         else:
             event = {}
             event["name"] = "domain.merge.error"
             event["instanceId"] = instance_id
             event["payload"] = {}
             event["payload"]["instanceId"] = instance_id
             event["payload"]["message"] = str(ex)
             event_manager.push(event)
         raise ex
Esempio n. 46
0
 def make_feature_vector(msgs, cusips):
     'return (FeatureVector, unused msgs) or raise NoFeatures'
     if len(cusips) == 0:
         log_msg = (
             'A SetCusipOTRs message was not received before a StartOutput message was received'
         )
         log.critical(log_msg)
         assert False, log_msg
     all_features = features.FeatureVector()
     shortest_unused = msgs
     for cusip in cusips:
         for name, maker in (('trace_print', features.trace_print),):
             try:
                 fv, unused = maker(msgs, cusip)
             except exception.NoFeatures as e:
                 raise e
             if len(unused) < len(shortest_unused):
                 shortest_unused = copy.copy(unused)
             # rename the feature to use the name
             # that makes the features unique
             for k, v in fv.items():
                 key = (
                     'id_%s_%s_%s' % (name, cusip, k[3:]) if k.startswith('id_') else
                     '%s_%s_%s' % (name, cusip, k)
                     )
                 all_features[key] = v
     # create a unique identifier for the feature vector
     all_features['id_feature_vector'] = feature_vector_identifiers.get_next()
     # add id info from TracePrint for the first primary cusip
     for msg in msgs:
         if msg.cusip == cusips[0]:
             # found first trade for the primary cusip
             all_features['id_primary_cusip'] = msg.cusip
             all_features['id_primary_cusip_issuepriceid'] = msg.issuepriceid
             all_features['id_primary_cusip_oasspread'] = msg.oasspread
             all_features['id_primary_cusip_reclassified_trade_type'] = msg.reclassified_trade_type
             break
     return all_features, shortest_unused
Esempio n. 47
0
def get_all_files_with_extension(look_in_dir='',
                                 base_name='',
                                 extension='.sww',
                                 verbose=False):
    '''Find all files in a directory with given stem name.
    Finds all the sww files in a "look_in_dir" which contains a "base_name".

    Returns: a list of strings

    Usage:     iterate_over = get_all_swwfiles(dir, name)
    then
               for swwfile in iterate_over:
                   do stuff

    Check "export_grids" and "get_maximum_inundation_data" for examples
    '''

    # plus tests the extension
    name, ext = os.path.splitext(base_name)

    if ext != '' and ext != extension:
        msg = 'base_name %s must be a file with %s extension!' \
              % (base_name, extension)
        raise IOError, msg

    if look_in_dir == "":
        look_in_dir = "."                               # Unix compatibility

    dir_ls = os.listdir(look_in_dir)
    iterate_over = [x[:-4] for x in dir_ls if name in x and x[-4:] == extension]

    if len(iterate_over) == 0:
        msg = 'No files of the base name %s in %s' % (name, look_in_dir)
        raise IOError, msg

    if verbose: log.critical('iterate over %s' % iterate_over)

    return iterate_over
Esempio n. 48
0
	def __peek_protoc(self):
		if len(self.recv_buf) == 0:
			return None

		check_result, proto_class, err = protoc.check_recv(self.recv_buf)
		# wait for un-complete package
		if err in [protoc.PKG_ERR_NOT_READY, protoc.PKG_ERR_TOO_SHORT]:
			log.debug('package un-complete, wait for it')
			return None

		bad_pkg_bytes = 0
		while err != protoc.PKG_OK and len(self.recv_buf) > 0:
			bad_pkg_bytes += 1
			self.recv_buf = self.recv_buf[1:]
			check_result, proto_class, err = protoc.check_recv(self.recv_buf)

		if bad_pkg_bytes:
			log.critical('detect bad package, ignored %d bytes, peer: %s' % (bad_pkg_bytes, self.peername))

		if err == protoc.PKG_OK:
			# log.debug('peek package:: %s, %s' % (str(check_result), str(proto_class)))
			return (check_result, proto_class)
		return None
Esempio n. 49
0
    def run(self):

        index = 0

        while True:
            try:
                # 这里的代理池在网上购买的
                url = 'http://api.66daili.cn/API/GetCommonProxy/?orderid=2891740301801427631&num=1000&token=XXX&format=json&line_separator=win&protocol=http&anonymous=elite,anonymous,transparent&area_exclude=%E4%B8%AD%E5%9B%BD&proxytype=https&speed=fast,quick,slow#api'
                html = urllib.request.urlopen(url, timeout=60).read().decode('utf-8')
                if html != None:
                    proxyList = json.loads(html)['proxies']
                    WebSpider.proxyList = proxyList
                    print('库存' + str(len(proxyList)) + '调代理' + self.pack)

                flag = gvalue.get_value('proxySwitch')
                print(flag)
                if False == flag:
                    print('关闭代理')
                    break;

            except Exception as e:
                log.critical('生成 Proxy Failed' + str(e))
            time.sleep(self.interval)
Esempio n. 50
0
def main():
    args = get_args()

    # For the moment/testing: REMOVE
    from config_personal import aws_access_key_id, aws_secret_access_key
    args.access = aws_access_key_id
    args.secret = aws_secret_access_key
    # End REMOVE   

    if not args.access: args.access = raw_input('Admin Access key: ')
    if not args.secret: args.secret = raw_input('Admin Secret key: ')

    try:
        conn = boto.iam.connection.IAMConnection(args.access, args.secret)
        acc, sec = create_user(conn, args.user, args.group)
        set_permissions(conn, acc, sec)
    except:
        log.critical('Cannot connect to AWS to configure user settings.')
        exit(1)
        
    success = test_permissions(conn, acc, sec)
    if success:
        # Will need args.user, acc, sec
        save_config()
Esempio n. 51
0
def write(filename_or_path: Union[str, Path],
          text: str,
          *,
          display=False) -> None:
    """Write text to a given file and optionally log it."""
    if isinstance(filename_or_path, Path):
        path = filename_or_path
    else:
        path = Path(filename_or_path).resolve()
        text = dedent(text)

    message = f'Writing file: {path}'
    line = '=' * (31 + len(message))
    if text:
        content = text.replace(' \n', '␠\n')
    else:
        content = '∅\n'
    if display:
        log.debug(message + '\n' + line + '\n' + content + line)
    else:
        log.critical(message)

    path.parent.mkdir(parents=True, exist_ok=True)
    path.write_text(text)
Esempio n. 52
0
def check_dir(path, verbose=None):
    """Check that specified path exists.
    If path does not exist it will be created if possible

    USAGE:
       checkdir(path, verbose):

    ARGUMENTS:
        path -- Directory
        verbose -- Flag verbose output (default: None)

    RETURN VALUE:
        Verified path including trailing separator
    """

    import os.path

    if sys.platform in ['nt', 'dos', 'win32', 'what else?']:
        unix = 0
    else:
        unix = 1

    # add terminal separator, if it's not already there
    if path[-1] != os.sep:
        path = path + os.sep

    # expand ~ or ~username in path
    path = os.path.expanduser(path)

    # create directory if required
    if not (os.access(path, os.R_OK and os.W_OK) or path == ''):
        try:
            exitcode = os.mkdir(path)

            # Change access rights if possible
            if unix:
                exitcode = os.system('chmod 775 ' + path)
            else:
                pass  # FIXME: What about access rights under Windows?

            if verbose: log.critical('MESSAGE: Directory %s created.' % path)
        except:
            log.critical('WARNING: Directory %s could not be created.' % path)
            if unix:
                try:
                    path = os.environ['TMPDIR']
                except KeyError:
                    path = '/tmp/'
            else:
                path = 'C:' + os.sep

            log.critical("Using directory '%s' instead" % path)

    return path
Esempio n. 53
0
class CleanupDaemon(UnixDaemon):

    name = "inGraph-cleanup"

    def before_daemonize(self):
        log.info("Starting inGraph-cleanup daemon..")
        try:
            databse_config = file_config('ingraph-database.conf')
        except IOError, e:
            log.critical(e)
            sys.exit(1)
        log.debug("Connecting to the database..")
        try:
            self._conn = connect(databse_config['dsn'])
        except KeyError:
            log.critical(
                "You need to set a database connection string (`dsn` setting) in your database configuration file."
            )
            sys.exit(1)
Esempio n. 54
0
    def mthStart(self):
        url = 'https://www.amazon.cn/gp/bestsellers'
        log.critical("亚马逊品类信息爬虫启动")
        level = 0
        urlInfo = {'url': url, 'level': level}
        self.urlQueue.put(urlInfo)
        thlist = list()
        for i in range(self.threadCount):
            th = SpiderThread(i, self.start)
            thlist.append(th)

        for i in range(self.threadCount):
            log.critical('线程 %s:开始运行(正常)', thlist[i].name)
            thlist[i].start()
            # 防止启动时队列空的情况
            time.sleep(5)

        for i in range(self.threadCount):
            log.critical('线程 %s:join', thlist[i].name)
            thlist[i].join()
Esempio n. 55
0
    def read_memory(self):
        """Read device memory."""

        # compute the memory used by data log, round-up to the entire sector
        if self.rec_method == self.RCD_METHOD_OVF:
            # in OVERLAP mode we don't know where data ends, read it all
            log.info('read_memory: OVERLAP mode, read entire memory')
            bytes_to_read = flash_memory_size(self.model_id)
        else:
            # in STOP mode we read from zero to NextWriteAddress
            log.info('read_memory: STOP mode, read zero to next write position')
            log.debug('.next_write_address=%06x (%d), .SIZEOF_SECTOR=%06x (%d)'
                      % (self.next_write_address, self.next_write_address,
                         self.SIZEOF_SECTOR, self.SIZEOF_SECTOR))
            sectors = int(self.next_write_address / self.SIZEOF_SECTOR)
            if self.next_write_address % self.SIZEOF_SECTOR:
                sectors += 1
            bytes_to_read = sectors * self.SIZEOF_SECTOR

        log.info('Retrieving %d (0x%08x) bytes of log data from device' % (bytes_to_read, bytes_to_read))

        non_written_sector_found = False

        offset = 0
        data = ''

        while offset < bytes_to_read:
            # request the next CHUNK of data
            self.send('PMTK182,7,%08x,%08x' % (offset, self.SIZEOF_CHUNK))
            msg = self.recv('PMTK182,8', 10)
            if msg:
                (address, buff) = msg.split(',')[2:]
                data += buff

                if (offset % self.SIZEOF_SECTOR) == 0:
                    if msg[19:19+self.SIZEOF_SEPARATOR*2] == 'FF'*self.SIZEOF_SEPARATOR:
                        print('WARNING: Sector header at offset 0x%08X is non-written data' % offset)
                        log.debug('read_memory: Got sector of non-written data at 0x%06x, ending read' % offset)
                        break

                offset += self.SIZEOF_CHUNK

                # update user 'percent read' display
                percent = offset * 100.0 / bytes_to_read
                sys.stdout.write('\rSaved log data: %6.2f%%' % percent)
                sys.stdout.flush()

            self.recv('PMTK001,182,7,3', 10)

        print('')   # terminate user 'percent read' display

        while offset < bytes_to_read:
            # request the next CHUNK of data
            self.send('PMTK182,7,%08x,%08x' % (offset, self.SIZEOF_CHUNK))
            msg = self.recv('PMTK182,8', 10)
            if msg:
                (address, buff) = msg.split(',')[2:]
                data += buff

                if (offset % self.SIZEOF_SECTOR) == 0:
                    if msg[19:19+self.SIZEOF_SEPARATOR*2] == 'FF'*self.SIZEOF_SEPARATOR:
                        print('WARNING: Sector header at offset 0x%08X is non-written data' % offset)
                        log.debug('read_memory: Got sector of non-written data at 0x%06x, ending read' % offset)
                        break

                offset += self.SIZEOF_CHUNK

                # update user 'percent read' display
                percent = offset * 100.0 / bytes_to_read
                sys.stdout.write('\rSaved log data: %6.2f%%' % percent)
                sys.stdout.flush()

            self.recv('PMTK001,182,7,3', 10)

        print('')   # terminate user 'percent read' display

        log.debug('%d bytes read (expected %d), len(data)=%d' % (offset+self.SIZEOF_CHUNK, bytes_to_read, len(data)))
        self.memory = data.decode('hex')
        log.debug('self.memory=%s' % data)

        with open('debug.bin', 'wb') as fd:
            fd.write(self.memory)
        with open('debug.asc', 'wb') as fd:
            fd.write(data)
        log.critical("Dumped memory to files 'debug.asc' (%d bytes) and 'debug.bin' (%d bytes)"
                     % (len(data), len(self.memory)))
Esempio n. 56
0
def setup_conn(host,name,user,password):
    if name != None:
        conn_string = "host='{}' dbname='{}' user='******' password='******'".format(host,name,user,password)
    else:
        conn_string = "host='{}' user='******' password='******'".format(host,user,password)
    conn = psycopg2.connect(conn_string)
    conn.autocommit = True
    log.info("Succesfully connected to DB {}/{} with user {}".format(host,str(name),user))
    return conn

#Set up analytics db connection
try:
    conn = setup_conn(HOST,NAME,USER,PASS)
except:
    log.critical("Unable to connect to the DB")


#QUERY BUILDERS
def match_analyses_sql(tags):
    formatted_tags = str(tags).replace("'", '"')
    q="""
        SELECT anly.analysis_id, anly.tags, anly.payload, anly.status
        FROM (
            SELECT at.analysis_id, count(*) AS cnt
            FROM (
                -- create table from analysis table with one tag per row
                SELECT json_array_elements(tags) AS tag, analysis_id FROM analyses
                ) at
            -- create table from input with one tag per row and join
            INNER JOIN (SELECT json_array_elements('{tags}') AS tag
Esempio n. 57
0
        QtWidgets.QMessageBox.critical(
            None, 'Log Create Error',
            f'Log file is not created successfully. Error is {e}')
        exit(-1)

    exit_code = 1
    try:
        from widgets.toast import Toast
        from mainWindow import MainWindow

        Toast.settings['iconsPath'] = fbs.get_resource(
            os.path.join('icons', 'toast'))
        mainWindow = MainWindow()
        mainWindowQss = fbs.qss('mainWindow.qss')
        if mainWindowQss is not None:
            mainWindow.setStyleSheet(mainWindowQss)
        else:
            log.warning(f'Main window qss is not loaded successfully')

        Toast.setWidget(mainWindow)

        mainWindow.show()
        exit_code = fbs.app.exec_()  # 2. Invoke appctxt.app.exec_()
        sys.exit(exit_code)
    except Exception as e:
        traceback.print_exc()
        log.critical("Unexpected Error, " + str(e))
        QtWidgets.QMessageBox.critical(None, "Unexpected Error", str(e))
    finally:
        sys.exit(exit_code)
Esempio n. 58
0
    if exportFile != "" and exportFileType != "":
        export.parseResults(exportFileType, exportFile, heuristicsCommands, xmlControl, VERBOSE, TIMERESOURCES)    
        
    # clean up environment
    environmentCleanup()
    
# start program execution

if __name__ == '__main__':
    try:
        import sys, time, log
       
        #creating the default log
        log.setLog("infoLog")        
    except ImportError, e:
        log.critical("failed initial module import.\n" + str(e))

    # check arguments
    # no need to continue with module loading if incorrect options are passed
    checkSystemArguments()
    args = xmlConfigFile    
    
    if VERBOSE:
        log.info("(0): apkg initialization")
        log.info(" 1:   loading modules")
        if TIMERESOURCES:
            log.info("       task started on "+ str(time.clock()))
                    
    try:
        # general operations
        import os, commands
Esempio n. 59
0
def main(argv=None):
    if argv is None:
        argv = sys.argv[1:]

    try:
        opts, args = getopt.getopt(argv, 'b:d:g:hp:s:v',
                                   ['bin=', 'dump=', 'debug=', 'erase', 'full=',
                                    'gpx=', 'help', 'log=', 'port=', 'speed=',
                                    'tracks=', 'version', 'waypoints='])
    except getopt.error as msg:
        usage(str(msg))
        return 1

    if len(args) != 0:
        usage()
        return 1

    # get debug level, set up logger
    debug_level = DefaultDebugLevel
    for (opt, param) in opts:
        if opt in ['--debug']:
            try:
                debug_level = int(param)
            except ValueError:
                # not int, could be symbolic
                param = param.upper()
                if param in DebugSymbolLevel:
                    debug_level = DebugSymbolLevel[param]
                else:
                    debug_level = -1
                if debug_level < MinDebugLevel or debug_level > MaxDebugLevel:
                    usage("Option '%s' requires integer or symbolic level" % opt)
                    return 1
    global log
    log = log.Log('mtkbabel.log', debug_level)
    if debug_level != DefaultDebugLevel:
        log.critical('Debug level set to %d' % debug_level)
    log.critical('main: argv=%s' % str(argv))

    # set default values
    port = None
    ports = get_tty_port()
    if len(ports) == 1:
        port = ports[0]
    speed = DefaultPortSpeed
    log.debug('port=%s, speed=%s' % (str(port), str(speed)))

    # pick out help, device, speed and version options
    for (opt, param) in opts:
        if opt in ['-h', '--help']:
            usage()
            return 0
        if opt in ['-p', '--port']:
            port = param
            log.info('Set port to %s' % port)
        if opt in ['-s', '--speed']:
            try:
                speed = int(param)
            except ValueError:
                usage("Option '%s' requires integer speed" % opt)
            if speed < MinPortSpeed or speed > MaxPortSpeed:
                error('Speed error, allowable range is (%d, %d)' % (MinPortSpeed, MaxPortSpeed))
            log.info('Set port speed to %d' % speed)
        if opt in ['-v', '--version']:
            print(Version)
            return 0

    # create QStarz object, if possible
    if port is None:
        port = find_device(speed)
        if port is None:
            log.critical('No port specified & none found, choices: %s' % ', '.join(ports))
            print('No port specified & none found, choices: %s' % ', '.join(ports))
            return 1
    gps = QStarz(port, speed)
    if not gps.init():
        log.debug('Device is %s, speed %d is not a QStarz device' % (str(port), speed))
        return 1
    log.debug('Device is %s, speed %d' % (str(port), speed))

    # now handle remaining options
    for (opt, param) in opts:
        if opt in ['-b', '--bin']:
            with open(param, 'rb') as fd:
                memory = fd.read()
            gps.set_memory(memory)
        if opt in ['-d', '--dump']:
            log.debug('Dumping memory to file %s' % param)
            memory = gps.get_memory()
            log.info('Read %d bytes' % len(memory))
            with open(param, 'wb') as fd:
                fd.write(memory)
            log.info('Wrote %d bytes to file %s' % (len(memory), param))
            return 0
        if opt in ['--erase']:
            not_yet_implemented('erase memory')
            return 0
        if opt in ['--full']:
            not_yet_implemented('memory full handling')
            return 0
        if opt in ['-g', '--gpx']:
            log.debug('Got --gpx option')
            data = gps.get_memory()
            parse_log_data(data)
            #self.write_gpx(param)
        if opt in ['--log']:
            not_yet_implemented('set logging criteria')
            return 0
        if opt in ['--tracks']:
            not_yet_implemented('write tracks GPX')
            return 0
        if opt in ['--waypoints']:
            not_yet_implemented('write waypoints GPX')
            return 0