def parse(self, parser): # the first token is the token that started the tag. # get the line number so that we can give # that line number to the nodes we create by hand. lineno = parser.stream.next().lineno # parse the passed template name # `template` is a jinja2.nodes.Const after this try: template = parser.parse_expression() except jinja2.exceptions.TemplateSyntaxError: parser.fail('Missing Argument to %s' % self._TAG_NAME, lineno) # target variable value will be name up to first dot # but after any slashes in the path. Like go. try: template_name = template.value except AttributeError: parser.fail('Argument to %s must be a string' % self._TAG_NAME) basename = os.path.basename(template_name) import_as = basename.split('.')[0] # key uniqueness on the import_as already_included = import_as in self.environment._loaded_files self.environment._loaded_files.add(import_as) # build the nodes import_node = jinja2.nodes.Import(template, import_as, True).set_lineno(lineno) # template, target, with_context include_node = jinja2.nodes.Include(template, True, False).set_lineno(lineno) # template, with_cotext, ignore_missing logger("Including template %s using LOAD at line %d" % (import_as, lineno)) if already_included: return import_node else: return [import_node, include_node]
def submit(self, name, cmd): command = ["bsub"] command.extend(["-J", name]) if self._queue: command.extend(["-q", str(self._queue)]) if self._log_dir: stderr = os.path.join(self._log_dir, "lsf-%s.err" % name) stdout = os.path.join(self._log_dir, "lsf-%s.out" % name) command.extend(["-o", stdout]) command.extend(["-e", stderr]) command.append(cmd) logger().debug("Submitting job: name=%s command=\n %s\n" % (name, command)) proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out = "".join([l for l in proc.stdout]) err = "".join([l for l in proc.stderr]) expr = 'Job <(?P<job_id>.+)> is submitted.*' match = re.search(expr, out) if proc.wait() != 0 or not match: raise TaskSubmitError("%s\n" "Executed command:\n%s\n%s\n" % ( out, err, " ".join(cmd) )) return match.group('job_id')
def run_playbook(dir, extra_vars): utils.VERBOSITY = 0 playbook_cb = MyPlaybookCallbacks(verbose=utils.VERBOSITY) stats = callbacks.AggregateStats() runner_cb = MyPlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY) inventory = """ [localhost] localhost ansible_connection=local """ # Create a temporary file and write the template string to it hosts = tempfile.NamedTemporaryFile(delete=False, dir=dir) hosts.write(inventory) hosts.close() pb = PlayBook( playbook=os.path.join(dir, "playbook.yml"), host_list=hosts.name, callbacks=playbook_cb, runner_callbacks=runner_cb, extra_vars=extra_vars, stats=stats ) results = pb.run() # Ensure on_stats callback is called # for callback modules playbook_cb.on_stats(pb.stats) logger().info(results)
def carpeta_edit(self): """Copia carpeta edit.""" logger("Copying Edit") a = os.path.join(self.dir_treball, self.c.dir_fotos) b = os.path.join(self.c.dir_fotografies, self.c.edit) self.copiar(a, b) shutil.rmtree(a)
def carpeta_original(self): """Copia carpeta original.""" logger("Copying Original") a = os.path.join(self.dir_treball, self.c.dir_original) b = os.path.join(self.c.dir_fotografies, self.c.original) self.copiar(a, b) shutil.rmtree(a)
def exceptions(self): try: yield except: (type, value, tb) = sys.exc_info() e = "".join(traceback.format_exception(type, value, tb)) logger().error("exception in executor: \n %s" % e)
def carpeta_web(self): """Procesa les imatges.""" logger("Processing") l = self.llista_imatges() for i, image in enumerate(l): self.barra(image, l) self.thumbs(i, image) print
def wait_for_results(jobs): logger().debug("waiting jobs to be finished: %s" % jobs) while len(jobs) > 0: name = executor.results_queue.get() if name.startswith("STOP"): return False jobs.remove(name) return True
def header_1(lfile): logger (lfile) print "" print "==============================LOG FILES==============================" print " Log file for the tests run : %s"%lfile print " Log file for ganesha.nfsd : /root/nfs-ganesha.log on the ganesha-host" print " Log file for pynfs test : /tmp/pynfs-results.txt" print "====================================================================="
def _read_samples(tsv_path): if tsv_path is None: return [] logger().debug("TSV config path: %s" % tsv_path) tsv_conf = tsv_config.parse(tsv_path) logger().info("TSV config first line: %s" % str(tsv_conf.row_at(0))) return list(tsv_conf.all_rows())
def recurse_refs(root, path): if isinstance(root, dict): for k, v in root.items(): recurse_refs(v, path + "->%s" % k) elif isinstance(root, list): for n, i in enumerate(root): recurse_refs(i, path + "[%d]" % n) logger().info("%s %d (%s)" % (path, sys.getrefcount(root), root.__class__))
def processar(self): """Procesa les imatges.""" logger("Processing") llista_im = self.llista_imatges() for image in llista_im: self.barra(image, llista_im) i = os.path.join(self.dir_treball, self.c.dir_fotos, image) gs = GimpScript(i) gs.run() print
def main(params): (system_ip,username,pwd,debug_enabled,thread_lock)=params logger(debug_enabled) logfile=logging.getLogger("logmain") logfile.info("*****************START LOG***************************") run_test=smoketest(system_ip,username,pwd,debug_enabled,thread_lock) if run_test.test_move_files() == False: return False else: return True
def parse(playbook_path, tsv_path, var_files, log_dir): plays = _read_plays(playbook_path) logger().info("Number of plays: %s" % len(plays)) samples = _read_samples(tsv_path) logger().info("Number of samples: %s" % len(samples)) vars = _read_variables(var_files) vars["all_samples"] = samples return PlayBookYaml(JobCommand(playbook_path, tsv_path, var_files, log_dir), plays, samples, vars)
def extract(self): """Extract kml from kmz.""" logger('Extract kml from kmz') kmz = ZipFile(self.abs_kmz_file, 'r') kml = None for l in kmz.namelist(): site_match = re.compile('\.kml').search(l) if site_match: kml = l break kmz.extract(kml, self.folder) os.rename("%s/%s" % (self.folder, kml), self.abs_kml_file)
def main(argv): parser = argparse.ArgumentParser(description="NGSpeasy pipelines") parser.add_argument("playbook_path", metavar='/path/to/your_pipeline.yml', type=cmdargs.existed_file) parser.add_argument("--play_index", dest="play_index", type=int, help="play index", required=True) parser.add_argument("--run_index", dest="run_index", type=int, default=-1, help="run index") parser.add_argument("--version", action="version", version="%(prog)s 3.0", help="print software version") parser.add_argument("--samples", metavar="/path/to/config.tsv", dest="samples_tsv", type=cmdargs.existed_file, help="List of samples in TSV format") parser.add_argument("--vars", dest="var_files", metavar="/path/to/your/vars.yml", help="additional variables", type=cmdargs.existed_file, action="append") parser.add_argument("--log_dir", dest="log_dir", type=cmdargs.existed_directory) args = parser.parse_args(argv) play_index = args.play_index run_index = args.run_index playbook_path = os.path.abspath(args.playbook_path) samples_tsv = os.path.abspath(args.samples_tsv) if args.samples_tsv else None var_files = [os.path.abspath(f) for f in args.var_files] logger().debug("Command line arguments: %s" % args) logger().debug("TSV config path: %s" % samples_tsv) pb = playbook_yaml.parse(playbook_path, samples_tsv, var_files, args.log_dir) (play_run_vars, play_run_yaml, play_run_name) = pb.play_run(play_index, run_index) if args.log_dir is not None: init_play_run_logger(args.log_dir, str(play_index) + "_" + play_run_name) task = play_run_yaml task["hosts"] = "all" temp_dir = tempfile.mkdtemp() print temp_dir try: roles_dir = os.path.join(os.path.dirname(playbook_path), "roles") if os.path.exists(roles_dir): shutil.copytree(roles_dir, os.path.join(temp_dir, "roles")) library_dir = os.path.join(os.path.dirname(playbook_path), "library") if os.path.exists(library_dir): shutil.copytree(library_dir, os.path.join(temp_dir, "library")) playbook = os.path.join(temp_dir, "playbook.yml") with open(playbook, 'w') as outfile: outfile.write(yaml.dump([task], default_flow_style=False)) run_playbook(temp_dir, play_run_vars) finally: shutil.rmtree(temp_dir)
def delete(filename, loglevel): from logger import logger if loglevel: logger().displayMessage(' Deleting {}... '.format(filename), False) ret = None try: os.remove(filename) except: print('Failed') ret = False else: print('Done') ret = True return ret
def _call(self, grammar, args=None): args = args or [] pb_call = [self.path] + args logger("Calling PB with cmd: %s" % ' '.join(pb_call)) pb = subprocess.Popen( pb_call, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = pb.communicate(grammar) logger("PB finished, return code: %d" % pb.returncode) return stdout, stderr, pb.returncode
def POE(logdir, target, logging, debug): if (logging == True): LOG = logger() newlogentry = '' dig_output_data = '' output = logdir + 'Dig.txt' FI = fileio() print '\r\n[*] Running Dig against: ' + target.target subproc = subprocess.Popen('dig -t NS ' + target.target, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) for dig_data in subproc.stdout.readlines(): if (debug == True): print '[DEBUG]: ' + dig_data dig_output_data += dig_data try: FI.WriteLogFile(output, dig_output_data) print colored('[*] Dig data had been written to file here: ', 'green') + colored(output, 'blue', attrs=['bold']) if (logging == True): newlogentry = 'Dig data has been generated to file here: <a href=\"' + output + '\"> Dig Output </a>' LOG.WriteLog(logdir, target.target, newlogentry) except: print colored('[x] Unable to write Dig data to file', 'red', attrs=['bold']) if (logging == True): newlogentry = 'Unable to write Dig data to file' LOG.WriteLog(logdir, target.target, newlogentry) return -1 return 0
def bruteforce(self, mylist): for i in mylist: if self.passwordexits == "True": break if i.split(":")[0] == self.password: self.passwordexits = "True" self.lastone = "Now" self.passinfo = "Password : %s" % i.split(":")[1] self.timeinfo = "Finished %d lines in %s second .." % (self.cacheinfo,str((datetime.now() - self.startime))[0:7]) banner( "print") print self.timeinfo break del mylist if self.lastone == "Now": self.timeinfo = "Finished %d lines in %s second .." % (self.cacheinfo,str((datetime.now() - self.startime))[0:7]) logger( self.passwordexits, self.timeinfo, self.passinfo)
def __init__(self, url, bucket, password="", verbose=False): self.log = logger.logger("VBucketAwareMemcachedClient") self.bucket = bucket self.rest_username = bucket self.rest_password = password self._memcacheds = {} self._vBucketMap = {} self._vBucketMap_lock = Lock() self._vBucketMapFastForward = {} self._vBucketMapFastForward_lock = Lock() #TODO: use regular expressions to parse the url server = {} if not bucket: raise InvalidArgumentException("bucket can not be an empty string", parameters="bucket") if not url: raise InvalidArgumentException("url can not be an empty string", parameters="url") if url.find("http://") != -1 and url.rfind(":") != -1 and url.find("/pools/default") != -1: server["ip"] = url[url.find("http://") + len("http://"):url.rfind(":")] server["port"] = url[url.rfind(":") + 1:url.find("/pools/default")] server["username"] = self.rest_username server["password"] = self.rest_password self.servers = [server] self.servers_lock = Lock() self.rest = RestConnection(server) self.reconfig_vbucket_map() self.init_vbucket_connections() self.dispatcher = CommandDispatcher(self) self.dispatcher_thread = Thread(name="dispatcher-thread", target=self._start_dispatcher) self.dispatcher_thread.daemon = True self.dispatcher_thread.start() self.streaming_thread = Thread(name="streaming", target=self._start_streaming, args=()) self.streaming_thread.daemon = True self.streaming_thread.start() self.verbose = verbose
def __init__(self): Pyro.config.PYRO_NS_HOSTNAME='134.126.132.72' #djs #Pyro.config.PYRO_NS_HOSTNAME='hatfull12.bio.pitt.edu' Subscriber.__init__(self) self.subscribe("clustalw") self._logger = logger.logger(sys.argv[2]) #Pyro.config.PYRO_NS_HOSTNAME='djs-bio.bio.pitt.edu' self.serverSelector = Pyro.core.getProxyForURI("PYRONAME://serverSelector") print 'selector:', self.serverSelector self._logger.log('got serverSelector') self.client = socket.gethostname() if self.client in ('phage', 'gene', 'pham'): self.client = self.client + '_hostname' self._logger.log('platform: ' + sys.platform) self._logger.log('hostname: ' + self.client) self.server = self.serverSelector.get_server(sys.platform, self.client) self._logger.log('using server ' + self.server) self.phamServer = Pyro.core.getProxyForURI("PYRONAME://"+self.server) if sys.platform == 'win32': # this will just use the current working directory self.rootdir = '' else: self.rootdir = '/tmp' if len(sys.argv) < 2: self._logger.log('Number of sequences to align not specified. Using default (1000)') self.numSeqs = 1000 elif int(sys.argv[1]) < 0 or int(sys.argv[1]) > 10000: self._logger.log('requested number of sequences (%s) is outside allowable range (1-10000). Using default (1000)' % sys.argv[1]) self.numSeqs = 1000 else: self.numSeqs = int(sys.argv[1]) self.align()
def __init__(self, url, save_dir='tmp'): """ @url: full url of a site @save_dir: dir to save site """ # log self.logger = logger('file', 'sitelog.log', save_dir) self.logger.info('-' * 20) self.logger.info('start') self.logger.info('start func: __init__') self.logger.info('url: %s' % url) save_time = datetime.strftime(datetime.now(), '%Y%m%d%H%M') self.save_time = save_time self.save_dir = os.path.abspath(os.path.join(save_dir, save_time)) # create dir if not exist if not os.path.isdir(self.save_dir): os.makedirs(self.save_dir) self.url = url u = URL(url) # get host like: http://m.sohu.xom self.host = u.scheme() + '://' + u.host() print '%s: saving %s' % (save_time, self.url) self.logger.info('end func: __init__')
def __init__(self, username, password): self.username = username self.password = password #Pyro.config.PYRO_NS_HOSTNAME='136.142.141.113' #djs if opts['nsname']: Pyro.config.PYRO_NS_HOSTNAME=opts['nsname'] else: Pyro.config.PYRO_NS_HOSTNAME='localhost' Subscriber.__init__(self) self.subscribe("clustalw") self._logger = logger.logger(sys.argv[2]) #Pyro.config.PYRO_NS_HOSTNAME='djs-bio.bio.pitt.edu' self.serverSelector = Pyro.core.getProxyForURI("PYRONAME://serverSelector") print 'selector:', self.serverSelector self._logger.log('got serverSelector') self.client = socket.gethostname() self._logger.log('platform: ' + sys.platform) self._logger.log('hostname: ' + self.client) self.server = self.serverSelector.get_server(sys.platform, self.client) self._logger.log('using server ' + self.server) self.phamServer = Pyro.core.getProxyForURI("PYRONAME://"+self.server) if sys.platform == 'win32': # this will just use the current working directory self.rootdir = '' else: self.rootdir = '/tmp' self.align()
def __init__(self, name, id, year): '''constructor name: name of player id: player id year: year to get ''' self.name = str(name).replace(' ', '') self.id = str(id).replace(' ', '') self.year = str(year) folderName = '{}_{}'.format(self.name, self.year) logName = 'Downloader_{}.log'.format(folderName) self.log = logger(logName, ovrw=True) if folderName not in os.listdir(): try: os.path.makedirs(self.folderName) except: self.log('Problem creating {}'.format(folderName), True, -1) else: self.log('Folder created {}'.format(folderName)) self.saveDir = os.path.join(os.getcwd(), self.folderName) self.urlBase = 'http://gd2.mlb.com/components/game/mlb/year_{}/'.format(year) yearSoup = BeautifulSoup(request.urlopen(self.urlBase), 'lxml') months = [] for a in yearSoup.find_all('a'): if 'month_' in a.get('href'): link = '{}/{}'.format(self.urlBase, a.get('href')) months.append(link) print(months)
def call(self): child = pexpect.spawn('telnet %s' % (self.hostname)) log_checkConfiguration = logger("Result_All_Calling.log", 'w') child.logfile = log_checkConfiguration.logFile() index = child.expect(['>', pexpect.EOF, pexpect.TIMEOUT], timeout=15) if index == 0: child.sendline(self.username + " " + self.password) index = child.expect(['CDMG S3MTXV19DN MS2K Sanity', pexpect.EOF, pexpect.TIMEOUT], timeout=15) if index == 0: print "***Calling*** [3/5]" child.sendline('mtxcrank cdr 10 10 10') index = child.expect('second') if index == 0: print "\033[1;32;40m***Calling Success*** [OK]\033[0m" elif index == 1: print "EOF when expect second" print "***Error Occured when calling***" else: print "Timeout when expect second" print "***Error Occured when calling***" child.logfile.close() elif index == 1: print "EOF when expect CDMG S3MTXV19DN MS2K Sanity" else: print "Timeout when expect CDMG S3MTXV19DN MS2K Sanity" elif index == 1: print "EOF when expect login and password" else: print "Timeout when expect login and password"
def POE(logdir, target, logging, debug): if (logging == True): LOG = logger() newlogentry = '' strings_dump = '' strings_output_data = '' output = logdir + 'PDFParse.txt' FI = fileio() if (logging == True): newlogentry = 'Running pdf-parse against: <strong>' + target.filename + '</strong>' LOG.WriteLog(logdir, target.filename, newlogentry) subproc = subprocess.Popen('/opt/static/pdf-parser.py -c ' + target.filename, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) for strings_data in subproc.stdout.readlines(): strings_output_data += strings_data if (debug == True): print strings_data try: FI.WriteLogFile(output, strings_output_data) print '[*] PDF Parse data had been written to file here: ' + output if (logging == True): newlogentry = 'PDF Parse data has been generated to file here: <a href=\"' + output + '\"> PDF Parse Output </a>' LOG.WriteLog(logdir, target.filename, newlogentry) except: print '[x] Unable to write PDF Parse data to file' if (logging == True): newlogentry = 'Unable to write PDF Parse data to file' LOG.WriteLog(logdir, target.filename, newlogentry) return -1 return 0
def __init__(self,motd="Message of the Day"): self.__log=logger('shellUI') try: shellUIinput.__init__(self,motd) self.__uiRow=self.getRow() except Exception as err: self.__log.write("__init__(): error:"+str(err))
def main(): argNum = 7 if len(sys.argv) < argNum: log = logger('Downloader Error') log('Expected format: -n First Last -i ID -y year') log('Got {} argument(s), expected {}'.format(len(sys.argv), argNum), True, -1) flags = ('-n', '-i', '-y') name = '' for i, arg in enumerate(sys.argv): print(arg) if arg is sys.argv[0]: continue if arg == '-n': j = i + 1 while sys.argv[j] not in flags: name += sys.argv[j] j += 1 elif arg == '-i': idNum = sys.argv[i + 1] elif arg == '-y': year = sys.argv[i + 1] inp = (name, idNum, year) down = downloader(*inp)
def run(): pylog = logger.logger(LOGFILE) while True: if interval_hit(): pylog.set_value(get_foreground_window_title()) time.sleep(0.2)
import logger import sys plug = __import__(sys.argv[1]) plug2 = __import__(sys.argv[2]) log = logger.logger(sys.argv[1], 0) log2 = logger.logger(sys.argv[2], 0) solSettings = [] j = 0 solSettings.append({ 'settings': { 'length': 100, 'k': 5 }, 'repr': 'bitString', 'weight': 1, 'prob': 'dTrap' }) solSettings.append({ 'settings': { 'length': 105, 'k': 7 }, 'repr': 'bitString', 'weight': 1, 'prob': 'dTrap' }) solSettings.append({ 'settings': { 'length': 200,
def processsentences(sents, index: int): global sentencerepository, vectorrepositoryidx, featurerepository, \ vectorrepositoryseq, vectorrepositorycxg, vectorrepositorysem for s in sents: index += 1 key = "s" + str(index) if s in sentencerepository.values(): continue fs = featurise(s) logger(s, debug) fcxg = fs["features"] fpos = fs["pos"] fsem = fs["roles"] fwds = fs["words"] logger(fwds, debug) logger(fpos, debug) logger(fcxg, debug) logger(fsem, debug) vecidx = tokenvector(fwds, None, True, debug) vecseq = seq.sequencevector(fpos, None, debug) vecis = sparsevectors.sparseadd(vecidx, vecseq, 1, True) logger("idx - comb\t" + str(sparsevectors.sparsecosine(vecidx, vecis)), debug) logger("seq - comb\t" + str(sparsevectors.sparsecosine(vecseq, vecis)), debug) veccxg = tokenvector(fcxg, vecis, False, debug) logger("comb - cxg\t" + str(sparsevectors.sparsecosine(vecis, veccxg)), debug) logger("idx - cxg\t" + str(sparsevectors.sparsecosine(vecidx, veccxg)), debug) logger("seq - cxg\t" + str(sparsevectors.sparsecosine(veccxg, vecseq)), debug) vecsem = rolevector(fsem, veccxg, debug) logger("idx - sem\t" + str(sparsevectors.sparsecosine(vecidx, vecsem)), debug) logger("seq - sem\t" + str(sparsevectors.sparsecosine(vecseq, vecsem)), debug) logger("comb - sem\t" + str(sparsevectors.sparsecosine(vecis, vecsem)), debug) logger("cxg - sem\t" + str(sparsevectors.sparsecosine(veccxg, vecsem)), debug) sentencerepository[key] = s vectorrepositoryidx[key] = vecidx vectorrepositoryseq[key] = vecseq vectorrepositorycxg[key] = veccxg vectorrepositorysem[key] = vecsem featurerepository[key] = fs logger(str(key) + ":" + str(s) + "->" + str(fs), debug) if index % 1000 == 0: logger(str(index) + " sentences processed", monitor)
from pprint import pprint from ib_insync import IB, util from ib_insync.ibcontroller import IBC, Watchdog from eventkit import Event from trader import Manager, VolumeStreamer from params import contracts from logger import logger log = logger(__file__[:-3]) class WatchdogHandlers: def __init__(self, dog): dog.startingEvent += self.onStartingEvent dog.startedEvent += self.onStartedEvent dog.stoppingEvent += self.onStoppingEvent dog.stoppedEvent += self.onStoppedEvent dog.softTimeoutEvent += self.onSoftTimeoutEvent dog.hardTimeoutEvent += self.onHardTimeoutEvent self.dog = dog @staticmethod def onStartingEvent(*args): log.debug(f'StartingEvent {args}') @staticmethod def onStartedEvent(*args):
def __init__(self,modelLayers=[],modelParameters={}): self.parameters = updateDictionary(modelParameters,DEFAULT_MODEL_PARAMETERS) self.setupModel(modelLayers) self.compileModel() self.logger = logger()
class core(): BLOG_FEED_URL = 'http://tweeria.wordpress.com/feed/' MAX_TIME_TO_SLEEP = 604800 FINISHED_EVENTS_LIFETIME = 1209600 loaded_data = {} log = logger.logger('/logs/parser.log') __version__ = u"2.3.2b" APP_DIR = os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0])+'/../' TEMPLATES_FOLDER = './templates/' MOBILE_TEMPLATES_FOLDER = './templates/mobile/' DELETED_ARTWORK_IMG = './data/artwork_delete.jpg' TMP_IMG_PATH = './templates/data/buffer/' PATH_FOR_UPLOADED_IMAGES = APP_DIR+'templates' RESIZED_IMG_PATH = './data/resized_image/' GUILDS_AVATAR_FOLDER = './data/resized_image/guilds_avatars/' GUILD_DEF_AVATAR = './data/guild_default.jpg' MAX_UPLOAD_FILE_SIZE = 256000 # 250kb MAX_AVA_WIDTH = 200 MAX_AVA_HEIGHT = 200 MAX_ARTWORK_UPLOAD_FILE_SIZE = 1598464 #1.5Mb MAX_ARTWORK_WIDTH = 600 MAX_ARTWORK_HEIGHT = 1000 THUMB_ARTWORK_WIDTH = 240 THUMB_ARTWORK_HEIGHT = 400 MAX_ITEM_SPELL_WIDTH = 100 MAX_ITEM_SPELL_HEIGHT = 100 THUMB_ITEM_SPELL_WIDTH = 56 THUMB_ITEM_SPELL_HEIGHT = 56 IMAGE_BUFFER_FOLDER = '/data/buffer/' IMAGE_GEN_FOLDER = './data/generated/' ARTWORK_PATH = '/data/characters/' ARTWORK_SHOP_PATH = ARTWORK_PATH IMAGE_ACHV_FOLDER = './data/achvs/' IMAGE_SPELL_FOLDER = './data/spells/' LAST_LOGIN_TIMEOUT = 3600 IMAGE_GEN_SOURCE_FOLDER = './data/to_generator/' IMAGE_RACE = {'Human':'human', 'Orc':'orc', 'Troll':'troll', 'Elf':'elf','Undead':'undead','Dwarf':'dwarf'} CACHE_TIME = 30 TMP_CACHE_TIME = 1 md5_pass_phrase = 'none' md5_invite_phrase = 'none' # MAP settings # hex sizes HEX_WIDTH = 36 HEX_HEIGHT = 40 HEX_LEFT = 3 HEX_TOP = 0 HEX_TOP_MINUS = 32 # map size MAP_WIDTH = 59 MAP_HEIGHT = 49 # ------------- # pathfinding and moving PATH_MINOR_P = 5 PATH_MAJOR_P = 25 PATH_SUPER_P = 40 AREA_PVP = {'x':[21,37], 'y': [17, 30]} # ------------- max_stored_messages = 20 max_guild_stored_messages = 20 max_notable_messages = 5 # DEBUG VARS always_login = False # for elfrey mpr pp = pprint.PrettyPrinter(indent=4) def __init__(self): config_loader = CoreConfigParser() self.loaded_data = config_loader.readData() self.HOST = self.loaded_data['site_address'] # Twitter API keys self.p_key = self.loaded_data['p_key'] self.p_secret = self.loaded_data['p_secret'] # Twitter parser API keys self.p_parser_key = self.loaded_data['p_parser_key'] self.p_parser_secret = self.loaded_data['p_parser_secret'] self.server_utc_offset = self.loaded_data['utc_global_offset'] # DEBUG if 'always_login' in self.loaded_data: self.always_login = self.loaded_data['always_login'] if self.always_login: self.login_name = self.loaded_data['login_name'] self.debug = self.loaded_data['debug'] self.conf_name = config_loader.conf_name self.base_fields = {'host': self.HOST} self.getBuildInfo() def getAuthHash(self, user_id): return md5.new(str(user_id)+self.md5_pass_phrase).hexdigest() def getBuildInfo(self): result = os.popen('git log | grep "^commit" | wc -l') lines = result.readlines() self.__build__ = int(lines[0]) def getInviteHash(self): return md5.new(self.md5_invite_phrase).hexdigest() def getAvatarLink(self, guild): if not 'img' in guild or guild['img'] == 'none' or not guild['img']: return self.GUILD_DEF_AVATAR else: return self.GUILDS_AVATAR_FOLDER+guild['img'] def mpr(self,what): self.pp.pprint(what) return what def relativePosition(self, coords): TOP_SHIFT = self.HEX_TOP if coords['x'] % 2 == 1: TOP_SHIFT = self.HEX_TOP_MINUS x = coords['x'] * self.HEX_WIDTH + self.HEX_LEFT y = coords['y'] * self.HEX_HEIGHT + TOP_SHIFT return {'x': x, 'y': y} def getMap(self): import db mongo = db.mongoAdapter() coords_array = mongo.getu('map', sort={'y': 1, 'x': 1}) map = [[]] x = 0 y = 0 for item in coords_array: if x < self.MAP_WIDTH: pass else: map.append([]) x = 0 y += 1 map[y].append([item['tile'],item['elevation']]) x += 1
import datetime from multiprocessing import Process import time # Need in the update record to double check that the result was properly updated # from pymongo import ReturnDocument from client_profile import ProfileInfo from client_link import LinkInfo from client_tweet import TweetInfo from api_twitter import TwitterApi as Twitter from utils import epoche_to_datetime from params import dataScheme # Logging from logger import logger logger = logger(name='client_REST_api', stream_level='DEBUG', file_level='ERROR') class EmptyListError(Exception): """ """ def __init__(self): pass class PauseError(Exception): """ """ def __init__(self): pass class RESTAPI(Process, dataScheme):
import os import numpy as np import flask import tarfile, boto3 import json from flask import Flask, request from recsys.ap_recsys import ApRecsys from recsys.train.aurora_client import AuroraConfig from recsys.serve.dynamodb_client import DynamoDBClient from logger import logger lg = logger().logger def download_artifacts(train_job_name, model_save_path): # s3 configure s3 = boto3.resource('s3') s3_client = boto3.client('s3') bucket = s3.Bucket(BUCKET_NAME) # make directory that model will save if (os.path.isdir(model_save_path) == False): os.mkdir(model_save_path) s3_Path = os.path.join(ARTIFACT_PATH, train_job_name.upper()) out_Path = model_save_path + 'model.tar.gz' for file in bucket.objects.filter(Prefix=s3_Path): s3_client.download_file(file.bucket_name, file.key, out_Path) lg.debug("Download_file ... ", file.key)
import updater import threading from main import app import config COLS = [u'Населенный пункт', u'Улица', u'Время отключения', u'Причина'] cmds = { u'Подписаться': 'notify', u'Отписаться': 'unnotify', u'Помощь': 'start', u'Показать ближайшее': 'show', u'Показать по подписке': 'showmy' } with open('/home/orangepi/electronotifybot/notify.pid', 'w+') as f: f.write('%s\n' % str(os.getpid())) logger = logger.logger() bot = telebot.TeleBot(config.token, threaded=False) user_dict = {} if config.heroku_debug: logger.debug('remote debug') sys.path.append('pycharm-debug.egg') import pydevd pydevd.settrace(config.server_debug, port=config.port_debug, stdoutToServer=True, stderrToServer=True)
def box_office_scrapper(): logger() scrape_runner() return {"process": "Done"}
window = int(properties["window"]) #os.environ["CORENLP_HOME"] = corenlphome stringspace = stringsequencespace.StringSequenceSpace(dimensionality, denseness, window) stringspace.importelementspace(charactervectorspacefilename) stringspace.importpospermutations(postriplefile) if __name__ == "__main__": if len(sys.argv) > 1: resourcedirectory = sys.argv[1] + "/" + language + "/text/" if len(sys.argv) > 2: outputdirectory = sys.argv[2] + "/" + language + "/" logger("input from: " + resourcedirectory, monitor) logger("output to: " + outputdirectory, monitor) if not os.path.exists(outputdirectory): os.makedirs(outputdirectory) # create new vectors for test set filenamelist = [] try: for filename in os.listdir(resourcedirectory): hitlist = re.match(filenamepattern, filename) if hitlist: # and random.random() > 0.5: filenamelist.append(os.path.join(resourcedirectory, filename)) except: logger("No files found in " + resourcedirectory, error)
def __init__(self): """ """ self.logger = logger()
#!/usr/bin/python from subprocess import call from logger import logger import sys log_file = sys.argv[1] sys.path.insert(0, '../') logger(log_file) from counter import counter from compare import compare call('./run-qa.sh', shell=True) print "=============================DBENCH TESTS BEGIN=============================" call( 'time /opt/qa/tools/system_light/run.sh -w /mnt/ganesha-mnt -l /export/dbench.log -t dbench > /export/dbench.log', shell=True) print "Log file : /export/dbench.log" ret = compare("Total 1 tests were successful", "/export/dbench.log") if ret == 1: print "DBENCH TESTS : PASS" counter(1) else: print "DBENCH TESTS : FAIL" print "==============================DBENCH TESTS END=============================="
index = 0 space = hyperdimensionalsemanticspace.SemanticSpace(dimensionality, denseness) if restorespace: space.inputwordspace("/home/jussi/data/storm/vectorspace/articlespace.hyp") seq = SequenceLabels(dimensionality, ngramwindow) if restorespace: seq.restore("/home/jussi/data/storm/vectorspace/sequencemodel.hyp") sentencerepository = {} vectorrepositoryidx = {} vectorrepositoryseq = {} vectorrepositorycxg = {} vectorrepositorysem = {} featurerepository = {} # =========================================================================== logger("starting with " + str(len(files)) + " files: " + str(files), monitor) def tokenvector(tokenlist, initialvector=None, weights=True, loglevel=False): if initialvector is None: initialvector = sparsevectors.newemptyvector(dimensionality) for item in tokenlist: if not weights or str(item).startswith( "JiK" ): # cxg features should not be weighted the same way lex feats are weight = 1 else: weight = space.languagemodel.frequencyweight(item, True) space.observe(item, True) tmp = initialvector initialvector = sparsevectors.sparseadd(
waitingTime = 1 if runningMode == 'LOCAL': # of course, not the case when running with nohup # because we will never have enough processes to saturate castor. waitingTime = 0 batchManager.SubmitJobs(waitingTime) # logging from logger import logger oldPwd = os.getcwd() os.chdir(batchManager.outputDir_) logDir = 'Logger' os.system('mkdir ' + logDir) log = logger(logDir) if doCVSTag == False: print 'cmsBatch2L2Q will NOT tag CVS' log.tagPackage = doCVSTag log.logCMSSW() log.logJobs(nJobs) #COLIN not so elegant... but tar is behaving in a strange way. log.addFile(oldPwd + '/' + cfgFileName) if not batchManager.options_.negate: if batchManager.remoteOutputDir_ != "": # we don't want to crush an existing log file on castor #COLIN could protect the logger against that. log.stageOut(batchManager.remoteOutputDir_)
itemspace.category[itemj["authorindex"]] = itemj["category"] itemspace.name[itemj["authorindex"]] = itemj["authorname"] categories.add(itemj["category"]) n += 1 if n >= testbatchsize: break except EOFError: goingalong = False return n categories = set() itemspace = SemanticSpace() importvectors(categorymodelfilename) logger( "Testing targetspace with " + str(len(categories)) + " categories, " + str(testbatchsize) + " test items. ", monitor) iter = 0 resultaggregator = [] prunedresultaggregator = [] for iterations in range(numberofiterations): iter += 1 logger("Iteration " + str(iter) + " of " + str(numberofiterations) + ".", monitor) items = list(itemspace.items()) if testtrainfraction > 0: random.shuffle(items) split = int(len(items) * testtrainfraction) testvectors = items[:split] knownvectors = items[split:]
def processdependencies(ss, loglevel=False): string = [] deps = [] negation = False adverbial = [] mainverb = False verbchain = [] tense = False subject = False mode = False aspect = False type = False # question, indicative, imperative, subjunctive ... logger("root: " + str(ss.basicDependencies.root), loglevel) i = 0 for w in ss.token: string.append(w.lemma) logger(str(i) + "\t" + w.lemma + " " + w.pos, loglevel) i += 1 for e in ss.basicDependencies.edge: dd = str(e.source) + " " + ss.token[e.source - 1].lemma + "-" + e.dep + "->" +\ " " + str(e.target) + " " + ss.token[e.target - 1].lemma deps.append(dd) logger(dd, loglevel) sentenceitems = {} sentencepos = {} scratch = {} npweight = {} scratch["aux"] = [] root = ss.basicDependencies.root[0] # only one root for now fix this! i = 1 for w in ss.token: sentenceitems[i] = w.lemma sentencepos[i] = w.pos scratch[i] = False i += 1 tense = "PRESENT" if sentencepos[root] == "VBD": tense = "PAST" if sentencepos[root] == "VBN": tense = "PAST" for edge in ss.basicDependencies.edge: logger( str(edge.source) + " " + sentenceitems[edge.source] + " " + "-" + " " + edge.dep + " " + "->" + " " + str(edge.target) + " " + sentenceitems[edge.target], loglevel) if edge.dep == 'neg' and sentencepos[edge.source] in verbposes: negation = True elif edge.dep == 'advmod': if edge.source == root: target = "epsilon" else: target = edge.source elif edge.dep == 'nsubj': subject = edge.target elif edge.dep == 'amod' or edge.dep == "compound": if edge.target in npweight: npweight[edge.target] += 1 else: npweight[edge.target] = 1 elif edge.dep == 'auxpass': if sentenceitems[edge.target] == "be": scratch['aux'].append("be") mode = "PASSIVE" elif edge.dep == 'aux': if sentenceitems[edge.target] == "have": scratch['aux'].append("have") if sentenceitems[edge.target] == "do": scratch['aux'].append("do") if sentenceitems[edge.target] == "be": scratch['aux'].append("be") if sentencepos[edge.source] in verbposes: tense = "PROGRESSIVE" if sentenceitems[edge.target] == "can": scratch['aux'].append("can") if sentenceitems[edge.target] == "could": scratch['aux'].append("could") if sentenceitems[edge.target] == "would": scratch['aux'].append("would") if sentenceitems[edge.target] == "should": scratch['aux'].append("should") if sentencepos[edge.target] == "VBD": tense = "PAST" if sentenceitems[edge.target] == "will": scratch['aux'].append("will") if sentenceitems[edge.target] == "shall": scratch['aux'].append("shall") try: if sentencepos[root] == "VB": if 'aux' in scratch: if "will" in scratch['aux'] or "shall" in scratch['aux']: tense = "FUTURE" except KeyError: logger("tense situation in " + string, True) features = [] if root > len(ss.token) / 2: features.append(tag + "VERYLATEMAINV") elif root > len(ss.token) / 3: features.append(tag + "LATEMAINV") else: features.append(tag + "EARLYMAINV") if 'aux' in scratch: for aa in scratch['aux']: features.append(tag + aa) if mode: features.append(tag + mode) if tense: features.append(tag + tense) if negation: features.append(tag + "NEGATION") for np in npweight: if npweight[np] > 2: features.append(tag + "HEAVYNP") if subject: if sentenceitems[subject] == "I": features.append(tag + "p1sgsubj") if sentenceitems[subject] == "we": features.append(tag + "p1plsubj") if sentenceitems[subject] == "you": features.append(tag + "p2subj") # logger(str(features) + "\t" + str(string) + "\t" + str(deps), True) return features
def __init__(self): """ Overloaded __init__ function, hiding the original __init__ on LOFARnodeTCP. """ self.logger = logger()
## be met: https://www.gnu.org/licenses/gpl-3.0.html. ## ## $QT_END_LICENSE$ ## ############################################################################# import asyncio from typing import Optional from gerrit import GerritStreamEvents, GerritStreamParser from logger import logger from git import Repository from jiracloser import JiraCloser from config import Config from .args import Args log = logger("bot") class Bot: def __init__(self) -> None: self.loop = asyncio.get_event_loop() self.g = GerritStreamEvents() self.parser = GerritStreamParser() self.args = Args() log.info("Using '%s' configuration", self.args.config_section) config = Config(self.args.config_section) self.jira_closer = JiraCloser(config) async def update_project(self, name: str, since: Optional[str] = None) -> None:
def depparseprocess(string, ss, debug=False): negated = False target = "epsilon" adverbial = "epsilon" subject = "epsilon" verb = "epsilon" qu = "epsilon" scratch = {} question = {} logger("root: " + str(ss.basicDependencies.root), debug) i = 0 for w in ss.token: logger(str(i) + " " + w.lemma + " " + w.pos, debug) i += 1 for e in ss.basicDependencies.edge: logger( str(e.source) + ss.token[e.source - 1].lemma + "-" + e.dep + "->" + str(e.target) + ss.token[e.target - 1].lemma, debug) sentenceitems = {} sentenceitems["epsilon"] = None sentencepos = {} root = ss.basicDependencies.root[0] # only one root for now fix this! qu = root target = root verb = root i = 1 for w in ss.token: sentenceitems[i] = w.lemma sentencepos[i] = w.pos scratch[i] = False if w.pos == "WP": qu = i if w.pos == "WRB": qu = i i += 1 tense = "PRESENT" if sentencepos[root] == "VBD": tense = "PAST" if sentencepos[root] == "VBN": tense = "PAST" for edge in ss.basicDependencies.edge: logger( str(edge.source) + " " + sentenceitems[edge.source] + " " + "-" + " " + edge.dep + " " + "->" + " " + str(edge.target) + " " + sentenceitems[edge.target], debug) if edge.dep == 'nsubj': logger("subject:" + str(edge.target) + sentenceitems[edge.target], True) subject = edge.target elif edge.dep == 'neg': negated = True elif edge.dep == 'advmod': if edge.target == qu: if edge.source == root: target = "epsilon" else: target = edge.source else: adverbial = edge.target elif edge.dep == 'cop': if edge.target == qu: target = edge.source else: adverbial = edge.target elif edge.dep == 'aux': if sentenceitems[edge.target] == "have": scratch['aux'] = "have" if sentenceitems[edge.target] == "do": scratch['aux'] = "do" if sentencepos[edge.target] == "VBD": tense = "PAST" if sentenceitems[edge.target] == "will": scratch['aux'] = "will" if sentenceitems[edge.target] == "shall": scratch['aux'] = "shall" if target == "epsilon": if subject != "epsilon": target = subject try: logger(sentenceitems[root] + " " + sentencepos[root], debug) if sentencepos[root] == "VB": if 'aux' in scratch: if scratch['aux'] == "will" or scratch['aux'] == "shall": tense = "FUTURE" except KeyError: logger("tense situation in " + string, True) question["question"] = sentenceitems[qu] question["target"] = sentenceitems[target] question["verb"] = sentenceitems[verb] question["adverbial"] = sentenceitems[adverbial] question["subject"] = sentenceitems[subject] question["tense"] = tense question["negated"] = negated # logger(question["question"] + " " + question["target"] + " " + # question["verb"] + " " + question["adverbial"] + " " + # question["subject"] + " " + question["tense"] + " " + # question["negated"] + " " + sep="\t",debug) return question
def __init__(self, options): self.options = options self.queue = Queue() self.proc = Process(target=self.loop) self.conn = None self.logger = logger.logger(self)
from block import Block from logger import logger from bge import logic DETECTION_POINTS = { 'LEFT': [10.0, 0.0, 0.0], 'RIGHT': [-10.0, 0.0, 0.0], 'DOWN': [0.0, 10.0, 0.0], 'UP': [0.0, -10.0, 0.0] } log = logger() class SpaceBlock(Block): def __init__(self): scene = logic.getCurrentScene() super(SpaceBlock, self).__init__(scene, scene.objects['space_block']) def detectLogicalBlocks(self): detectedObjs = {} for direction, point in DETECTION_POINTS.items(): block = self.obj.rayCastTo(point, 1.0, 'logical_block') if not block: continue detectedObjs[str(block['block_number'])] = direction return detectedObjs def setPosition(self, node): self.obj.position[0] = node.position[0] self.obj.position[1] = node.position[1] self.setNode(node)
def parse_xpaths(self, response): # Is this a 'projects' URL ? regexp = re.compile(r'projects') url = response.url driver_set_up = web_driver_setup.web_driver_setup("log") driver = driver_set_up.driver driver.get(url) log = logger.logger() log.init("log") # This check may now be unnecessary if regexp.search(response.url): item = Project_Item() item['url'] = response.url print("This is the logger URL", response.url) # Check the status of the project stat_live = driver.find_elements_by_xpath( '//div[@class="Campaign-state-live"]') if len(stat_live) > 0: #print("The status of project "+str(response.url)+" is live.") status = "open" else: #print("Live state not found.") status = "closed" # add url has all the logic to adjust status in log file log.add_url(response.url, status) if status == "closed": #print("Find the dates for a closed project") funding_period = driver.find_element_by_xpath( '//*[@class="NS_campaigns__funding_period"]') beg_end = funding_period.find_elements_by_xpath('.//time') start_date = beg_end[0].get_attribute('datetime') end_date = beg_end[1].get_attribute('datetime') #print("This is the start date for a closed project", start_date) #print("This is the end date for a closed project", end_date) item['start_date'] = start_date item['end_date'] = end_date elif status == "open": #print("Find the dates for an open project") # start_date_element = driver.find_element_by_xpath('//*[contains(@class, "js-state_changed_at")]') # start_date_element = driver.find_element_by_xpath('//*[@class="js-state_changed_at"]') # start_date_element = start_date_element.find_element_by_xpath('//time') # print("This is the start date for an open project", start_date_element.get_attribute('datetime')) end_date_element = driver.find_element_by_xpath( '//*[contains(concat( " ", @class, " " ), concat( " ", "type-12", " " ))]//*[contains(concat( " ", @class, " " ), concat( " ", "js-adjust-time", " " ))]' ) #print("This is the end date for an open project", end_date_element.get_attribute('datetime')) # start_date = start_date_element.get_attribute('datetime') start_date = driver.find_element_by_xpath( '//*[@class="js-adjust-time"]').text end_date = end_date_element.get_attribute('datetime') item['start_date'] = start_date item['end_date'] = end_date try: # Use this one name = response.xpath('//html/head/title').re( r'(\n.*\n)')[0].strip() item['name'] = name except IndexError: print('Caught IndexError parsing name') # print('This url through an exception on parse: ', response.url) # pass try: # Use this one pledge_numbers = response.xpath('//*[@id="pledged"]').re( r'(?<=data-pledged=)"(.*[0-9])')[0] # item['total_raised'] = pledge_numbers except IndexError: print('Caught IndexError parsing pledged money') # print('This url through an exception on parse: ', response.url) # pass item['total_raised'] = 'NOT FOUND' if item['total_raised'] == 'NOT FOUND': try: pledge_numbers = response.xpath( '//*[contains(concat( " ", @class, " " ), concat( " ", "mb0", " " ))]' '//*[contains(concat( " ", @class, " " ),' ' concat( " ", "money", " " ))]').re( r'[$\d\.\,\s]+[-0-9.,]+[-0-9.,a-zA-Z]*\b') item['total_raised'] = pledge_numbers except: print( 'Caught a super bad error parsing total funding raised' ) item['total_raised'] = 'NOT FOUND' pass if status == "open": try: # Use this one goal = response.xpath( '//*[contains(concat( " ", @class, " " ), concat( " ", "money", " " ))]' ).re(r'[$\d\.\,\s]+[-0-9.,]+[-0-9.,a-zA-Z]*\b')[0].strip() # goal = response.xpath('//*[@id="content-wrap"]/section/div/div[3]/div/div/div[3]/div[1]/ # span[3]/span[1]').re(r'\$[-0-9.,]+[-0-9.,a-zA-Z]*\b')[0] # item['funding_target'] = goal except IndexError: print('Caught IndexError parsing goal') # print('This url through an exception on parse: ', response.url) # pass item['funding_target'] = 'NOT FOUND' elif status == "closed": try: goal = response.xpath( '//*[contains(concat( " ", @class, " " ), concat( " ", "navy-500", " " ))]' '//*[contains(concat( " ", @class, " " ), concat( " ", "money", " " ))]' ).re(r'[$\d\.\,\s]+[-0-9.,]+[-0-9.,a-zA-Z]*\b')[0].strip() item['funding_target'] = goal except: print('Caught a super bad error parsing funding goal') item['funding_target'] = 'NOT FOUND' pass if item['funding_target'] == 'NOT FOUND': try: goal = \ response.xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "lh3-lg", " " ))]' '//*[contains(concat( " ", @class, " " ), concat( " ", "money", " " ))]').re( r'[$\d\.\,\s]+[-0-9.,]+[-0-9.,a-zA-Z]*\b')[0].strip() item['funding_target'] = goal except: item['funding_target'] = 'NOT FOUND' pass try: # Use this one backers = response.xpath('//*[(@id = "backers_count")]').re( r'"[-0-9.,]*"')[0].strip() item['num_backers'] = backers except: print('Caught IndexError on number of backers') # print('This url through an exception on parse: ', response.url) # pass item['num_backers'] = 'NOT FOUND' pass # Another try-except that should work for closed projects # This xpath just happens to get both amount raised and # backers if item['num_backers'] == 'NOT FOUND': try: backers_amt = driver.find_element_by_xpath( '//div[@class="mb0"]') # Handy xpath magic for finding integers in a string temp_back = [ int(s) for s in backers_amt.text.split() if s.isdigit() ] backers = temp_back[0] item['num_backers'] = backers except: print('Caught a super bad error finding number of backers') item['num_backers'] = 'NOT FOUND' if status == "closed": # Get the category and location #loc_and_cat = driver.find_element_by_xpath('//*[contains(@class, "NS_projects__category_location")]') try: loc_and_cat = driver.find_element_by_xpath( '//*[contains(concat( " ", @class, " " ), concat( " ", "NS_projects__category_location", " " ))]' ) kids = loc_and_cat.find_elements_by_xpath('.//*') #print("This is the locations", loc_and_cat.text) lc_list = [] for kid in kids: if len(kid.text) > 3: #print("These are the kids", kid.text) lc_list.append(kid.text) location = lc_list[0] category = lc_list[1] item['location'] = location item['category'] = category except: # Closed failed projects seem to need same xpath now as open projects #This is ugly but ok... location = response.xpath( '//*[(@class = "nowrap navy-700 flex items-center medium type-12")]' ).re(r'(\n.*\n)')[1].strip() #[2].strip() #print("The location is ", location) item['location'] = location # Gets the category of the project category = response.xpath( '//*[(@class = "nowrap navy-700 flex items-center medium mr3 type-12")]' ).re(r'(\n.*\n)')[1].strip() #print("The category is ", category) item['category'] = category elif status == "open": try: #This is ugly but ok... location = response.xpath( '//*[(@class = "nowrap navy-700 flex items-center medium type-12")]' ).re(r'(\n.*\n)')[1].strip() #[2].strip() #print("The location is ", location) item['location'] = location # Gets the category of the project category = response.xpath( '//*[(@class = "nowrap navy-700 flex items-center medium mr3 type-12")]' ).re(r'(\n.*\n)')[1].strip() #print("The category is ", category) item['category'] = category except: print( "Caught nasty exception parsing category and location") item['location'] = 'NOT FOUND' item['category'] = 'NOT FOUND' pass # Gets the end date of the project #end_date = driver.find_element_by_xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "type-12", " " ))]//*[contains(concat( " ", @class, " " ), concat( " ", "js-adjust-time", " " ))]') #item['end_date'] = end_date.text # This is all images -- even small ones. pics = driver.find_elements_by_xpath( '//*[contains(concat( " ", @class, " " ), concat( " ", "fit", " " ))]' ) # pics = driver.find_elements_by_tag_name("img") # print("The number of images is ", len(pics)) item['num_pics'] = len(pics) #Get the number of videos vids = driver.find_elements_by_tag_name("iframe") item['num_vids'] = len(vids) # Get numbers like number of updates, FAQs, etc #element = driver.find_element_by_xpath('//*[((@class = "NS_projects__content"))]') element = driver.find_element_by_xpath( '//*[(contains(@class,"NS_projects__content"))]') # Updates sub_element = element.find_element_by_xpath( '//a[(@data-content="updates")]') #print("This is the sub_element", sub_element.text.encode('utf8')) text = sub_element.text.encode('utf8') result = [int(s) for s in text.split() if s.isdigit()] if len(result) == 0: result = 0 #print(" Result of regex search ", result) item['number_of_updates'] = result # FAQs sub_element = element.find_element_by_xpath( '//a[(@data-content="faqs")]') #print("This is the sub_element", sub_element.text.encode('utf8')) text = sub_element.text.encode('utf8') result = [int(s) for s in text.split() if s.isdigit()] if len(result) == 0: result = 0 #print(" Result of regex search ", result) item['number_of_faqs'] = result # Comments sub_element = element.find_element_by_xpath( '//a[(@data-content="comments")]') #print("This is the sub_element", sub_element.text.encode('utf8')) text = sub_element.text.encode('utf8') result = [int(s) for s in text.split() if s.isdigit()] if len(result) == 0: result = 0 print(" Result of regex search ", result) item['number_of_comments'] = result # Start of code to find the all the different pledge/reward levels outer_level_pledge_panel = driver.find_element_by_xpath('//ol') pledge_amounts = outer_level_pledge_panel.find_elements_by_xpath( './/*[@class="pledge__amount"]') # if len(pledge_amounts) > 0: # # print("I found pledge amounts") # # else: # # print("I didn't find pledge amounts") pledge_list = [] for pledge_amount in pledge_amounts: try: money = pledge_amount.find_element_by_xpath( './/*[@class="money"]') pledge_list.append(money.text) except: print("Caught a 'NoSuchElementException' ") reward_levels = "{" for pledges in pledge_list: pledges = unicode(pledges) reward_levels = reward_levels + pledges + ";" reward_levels = reward_levels + "}" # Concatenated list of reward/pledge $ amounts item['reward_levels'] = reward_levels # Number of different pledge values item['number_of_reward_levels'] = len(pledge_list) # Gets all the description text description_text = driver.find_elements_by_xpath( '//*[contains(concat( " ", @class, " " ), concat( " ", "formatted-lists", " " ))]//p' ) if len(description_text) == 0: # Catches the case where description text isn't separated into paragraph elements description_text = driver.find_elements_by_xpath( '//*[contains(concat( " ", @class, " " ), concat( " ", "formatted-lists", " " ))]' ) description = "{" for descriptions in description_text: description = description + descriptions.text risk_text = driver.find_elements_by_xpath( '//*[contains(concat( " ", @class, " " ), concat( " ", "js-risks", " " ))]//p' ) # Gets the description from the risks and challenges section for risks in risk_text: description = description + risks.text description = description + "}" item['description'] = description # Find and concatenate all the comments for a project driver.get((response.url) + "/comments") comments = driver.find_elements_by_xpath( '//*[contains(concat( " ", @class, " " ), concat( " ", "ml3", " " ))]//p' ) comment_list = "{" for comment in comments: comment_list = comment_list + comment.text + ";" comment_list = comment_list + "}" item['comments'] = comment_list #item['number_of_comments'] = len(comment_list) # Find and concatenate all the faqs for a project driver.get((response.url) + "/faqs") #faqs = driver.find_elements_by_xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "ml3", " " ))]//p') #faqs = driver.find_elements_by_xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "js-expanded", " " ))]//*[(((count(preceding-sibling::*) + 1) = 1) and parent::*)]//*[(((count(preceding-sibling::*) + 1) = 1) and parent::*)]') faqs = driver.find_elements_by_xpath('//*[@id="project-faqs"]') #print("Just the faqs, mame", faqs) faq_list = "{" for faq in faqs: #print(faq.text) faq_list = faq_list + faq.text + ";" faq_list = faq_list + "}" item['faqs'] = faq_list # num_faqs = driver.find_element_by_xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "project-nav__link--faqs", " " ))]//*[contains(concat( " ", @class, " " ), concat( " ", "count", " " ))]')#len(faq_list) #item['number_of_faqs'] = num_faqs.text # Find how many updates there are driver.get((response.url) + "/updates") updates = driver.find_elements_by_xpath( '//*[contains(concat( " ", @class, " " ), concat( " ", "js-adjust-time", " " ))]' ) #update_len = len(updates) #start_date = updates[update_len-1].text #item['start_date'] = start_date #print("Start date is ", start_date) #item['number_of_updates'] = len(updates[1:update_len-2]) #for update in updates: # update_list = update_list+update.text+";" #update_list = update_list+"}" #print((response.url)+"/updates") #print("This is the list of updates.", update_list ) #time.sleep(10) #item['faqs'] = faq_list #item['number_of_faqs'] = len(faq_list) # Gets the text for updates # Updates can be on multiple pages # But lets not go through update pages if we have no updates for i in range(1, 10): driver.get((response.url) + "/posts?page=" + str(i)) #update_texts = driver.find_elements_by_xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "grid-post__content", " " ))]//p') update_texts = driver.find_elements_by_xpath( '//*[contains(concat( " ", @class, " " ), concat( " ", "formatted-lists", " " ))]//p' ) update_text_list = '{' for update_text in update_texts: update_text_list = update_text_list + update_text.text + " " update_text_list = update_text_list + "}" item['updates'] = update_text_list #Item in this context is all the info about a single project driver.close() return item
#!/usr/bin/env python3 import sys from websocketThread import WebSocketThread from application import application from logger import logger sys.path.append('.') if __name__ == '__main__': socket = WebSocketThread() try: socket.start() application.mainloop() except KeyboardInterrupt: # TODO: close ws server and loop correctly logger("[exit] Exiting program...")
def start_requests(self): # Get Chrome web driver from helper function driver_set_up = web_driver_setup.web_driver_setup("test") driver = driver_set_up.driver # Kickstarter API sort type # Uncomment this when not testing... #url_sort_types = ["newest", "end_date", "magic", "popularity", "most_backed", "most_funded"] url_sort_types = ["newest", "end_date", "magic", "popularity"] #url_sort_types = ["most_funded"] #Randomize sort type to try to find all new projects with repeated scrapes url_sort_types = url_sort_types[random.randint(0, 3)] project_urls = [] # Loop acquires list of urls to crawl for url_sort_type in url_sort_types: # Magic sort type randomizes based on some seed value # For magic, loop over a few random seeds to try to find all projects if url_sort_type == "magic" or url_sort_type == "most_backed" or url_sort_type == "most_funded": seeds = [str(random.randint(0, 999))] #Uncomment this when not testing... # seeds = [str(random.randint(0, 999)), str(random.randint(0, 999)), str(random.randint(0, 9999)), # str(random.randint(0, 99999)), str(random.randint(0, 9999999)),str(random.randint(0, 9999999)), # str(random.randint(0, 999999))] else: seeds = [str(random.randint(0, 999))] for seed in seeds: base_url = [ "https://www.kickstarter.com/discover/advanced?sort=", url_sort_type, "&seed=", seed ] # Max page index is 200, loop over all of them for page in range(1, 200): #200): #200): page_number = ["&page=", str(page)] full_url = "" url = "" full_url = base_url + page_number page_url = url.join(full_url) # Get the page after constructing url driver.get(page_url) # Gets the project urls elements = driver.find_elements_by_xpath( '//div[@class="js-track-project-card"]') log = logger.logger() log.init("test") for element in elements: link = element.find_element_by_tag_name("a") url = link.get_attribute("href") #print(url) if url not in project_urls: project_urls.append(url) print( "This is the URL that will be passed to the logger ", url) print str(url.encode('utf8')) log.add_url(str(url.encode('utf8')), "open") #percent_live_found = float(len(project_urls))/float(proj_count)*100.00 print(len(project_urls), " project urls found.") #print("Test spider found ", percent_live_found, "% of live Kickstarter projects") log.write_out_log() #print(" I'm writing out a log file...") #time.sleep(5) # for url in project_urls: # # yield scrapy.Request(url, callback=self.parse_xpaths) #for url in project_urls: # yield scrapy.Request(url, callback=self.parse) yield scrapy.Request("https://www.kickstarter.com/discover/advanced", callback=self.parse)
def __init__(self): self._logger = logger() self._config = Config.getInstance()
import time import challonge from logger import logger from pprint import pprint from constants import TAGS_TO_COALESCE from player_web import update_web from urllib.error import HTTPError import os smash = None id_tag_dict = {} sanitized_tag_dict = {} debug = False LOG = logger(__name__) skip_count = 0 def sanitize_tag(tag): tag = ''.join([i if ord(i) < 128 else ' ' for i in tag]) # Parse out sponsor tag = tag.split('|')[-1].lstrip().rstrip() return re.sub("[^a-z A-Z 0-9 : /]",'',tag.lower()).rstrip().lstrip() def analyze_smashgg_tournament(db, url, scene, dated, urls_per_player=False, display_name=None, testing=False): global smash global skip_count if smash == None: smash = pysmash.SmashGG()
#fill a new section in the cron file cron.fillJobs(cron.getJobs(db.getDao())) #************************************main**************************************** #instanciate vars instances = None release = None playbooks = None is_croner = None rule = None is_croner = False run_webserver = False #instanciate Logger log = logger.logger(log_file, log_lvl, log_format) logger = log.create_logger() logger.info('---- logger init ----') # get input for the py script argv = sys.argv[1:] try: opts, args = getopt.getopt( argv, "hcsi:r:p:u:", ["instances=", "release=", "playbooks=", "rule="]) except getopt.GetoptError: print '%s -i <instances array> -r <release> -p <playbooks array> -u <rule> -s -c \n -c : launch croning process who\'ll read from db and insert entries into crontab\n -s : run webserver to serve restful calls' % __file__ for opt, arg in opts: if opt == "-h": print '%s -i <instances array> -r <release> -p <playbooks array> -u <rule> -s -c \n -c : launch croning process who\'ll read from db and insert entries into crontab\n -s : run webserver to serve restful calls' % __file__ sys.exit()
cf_embed_dim = 100 train_ds = tf.data.Dataset.from_tensor_slices((aug_enc, label_vect)).shuffle(10000).batch(cf_batch_size, drop_remainder=True) for sample_desc, sample_labs in train_ds.take(1) : pass #%% Make model modeltxt = tx.Text(vocab_size=len(tokenizer.word_index)+1+4, embed_dim=cf_embed_dim, batch_size=cf_batch_size, latent_dim=cf_latent_dim, learning_rate=cf_learning_rate) modeltxt.setLR(cf_learning_rate) modeltxt.printMSums() modeltxt.printIO() loss_mean = tf.keras.metrics.Mean() #%% Setup logger info run_name = 'txtruns/0207-0831' root_dir_local = '/home/starstorms/Insight/shape/' + run_name root_dir_remote = '/data/sn/all/runs/' + run_name lg = logger.logger(root_dir = root_dir_remote if remote else root_dir_local) # lg = logger.logger() lg.setupCP(encoder=None, generator=modeltxt.text_model, opt=modeltxt.optimizer) lg.restoreCP() #'/home/starstorms/Insight/shape/runs/0204-2220/models/ckpt-71') #%% Train model manually def trainModel(num_epochs, display_interval=-1, save_interval=10) : print('\nStarting training...') modeltxt.training=True for epoch in range(1, num_epochs): start_time = time.time() loss_mean.reset_states() for train_x, train_y in train_ds : desc_index = random.randint(0,cf_descs_per_shape-1) # st = time.time() trn_x = train_x[:,desc_index,:]
# -*- coding:utf-8 -*- import os import re import psycopg2 import time from logger import logger from zbxcfg import zbxcfg log = logger().getlogger class database(object): def __init__(self, type='pg'): self.host = 'localhost' try: self.config = zbxcfg().config() self.dbuser = self.config['user'] self.password = self.config['password'] if type == 'pg': self.dbname = self.config['dbname'] elif type == 'pgb': self.dbname = self.config['pgbdbname'] else: log.error("Type not supported") self.unix_socket_directory = self.config['unix_socket_directory'] log.debug("config : {}".format(self.config)) except Exception as e: log.error(e)
w1_logfile_name = "{}_w1_sensors_logged.csv".format(log_file_index) # create log files if (logging_enabled): # w1 with open(csv_log_base_path + w1_logfile_name, 'w+') as f: writer = csv.writer(f) writer.writerow(sample_time_header + w1_header) ########################### # "Main" ########################### # w1 logger object w1_logger = logger.logger("w1_logger", logFileName="{}{}_{}.log".format( w1_log_base_path, log_file_index, w1_log_file_base)) # loop counter loop_index = 0 # starting relative time stamp t_start_rel = time.time() # previous time prev_epoch_time = t_start_rel # If there are no w1 sensors, exit if (num_w1_sensors == 0): w1_logger.log.warning("NO W1 SENSORS DETECTED!!!") sys.exit(1)