def main(): """ The main function, mostly because I'm a shit python dev and my files can get REALLY CLUTTERED. What this script does is parse through /g/, finds any wdg generals, and if it detects any post that has the format of keyword1: data1, keyword2: data2, etc, it will be parsed and added here the function below is mostly for showcasing it. """ wdgPosts = findwdg() p = MyHTMLParser() db.init() testme=True for i in wdgPosts.json()["posts"]: p.feed(i["com"]) parsedpost=parsepost(p.pop()) if(parsedpost!=None): if(i.get("tim") != None): parsedpost["image"] = "https://i.4cdn.org/g/"+str(i["tim"])+i["ext"] # parsedpost["image"] = pars if(testme): parsedpost["progress"]="; drop table projects; --" testme=False db.insertentry(parsedpost) p.close() import pprint p = pprint.PrettyPrinter() list(map(lambda i:p.pprint(i), db.getall()))
def create_app(): global app """ Flask application factory """ # Setup Flask app and app.config app = Flask(__name__) app.config.from_envvar('APP_CONFIG') app.config['SECRET_KEY'] = os.urandom(32) mail = Mail(app) recipient = app.config['MAIL_RECIPIENT'] email.init(mail, recipient) db.init(app.config['DB_HOST'], app.config['DB_PORT'], app.config['DB_USER'], app.config['DB_PASSWORD'], app.config['DB_DATABASE']) csrf.init_app(app) @app.errorhandler(CSRFError) # @csrf.error_handler def csrf_error(reason): # noinspection PyBroadException try: return jsonify(reason=reason) except: return str(reason) return app
def main(): # Initialize db/fts db.init() db.fts.init() model.category.init() db.mock_data.maybe_bootstrap_db() app.run(host='0.0.0.0', port=80)
def do_import(): musicbrainzngs.set_useragent("SBUIfy", "0.0.1", "*****@*****.**") logging.getLogger('musicbrainzngs').setLevel(logging.WARNING) db.init() # remove existing data print('deleting existing data...') # delete_images() todo: only delete artist & album images; move to image module db.execute_script('truncate.sql') print('done.', flush=True) # import albums, songs, and artists for all labels db.execute("SELECT * FROM label") for label in db.get_cursor().fetchall(): print('importing label {}...'.format(label['name'])) import_label_releases(label['id'], label['mbid']) print('done.', flush=True) # wrap up print('finishing...') db.execute_script('set_hibernate_sequence.sql') print('done.') db.close()
def run(): db.init() try: ros = get_ros() delay = DEFAULT_SECONDS_BETWEEN_READINGS while True: start = time.time() sys.stdout.write('\r\033[K') for pin_num, (sensor_type, ro) in enumerate(zip(SENSOR_TYPES, ros)): val = adc.read(pin_num) dt = datetime.now(timezone.utc).astimezone() logging.info('%s=%g ' % (sensor_type, val)) temp, hum = openweather.get_temperature_and_rel_humidity() db.store_reading(dt, sensor_type, val, ro, temp, hum) delay = upload_recorded() try: delay = float(delay) except (TypeError, ValueError) as e: delay = DEFAULT_SECONDS_BETWEEN_READINGS logging.error('Cannot parse delay: %s' % e) logging.info('Wait %s seconds...' % delay) sleep = delay - time.time() + start if sleep > 0: time.sleep(sleep) except KeyboardInterrupt: db.close_connection()
def main(): db.init() abs_path = _get_abs_path() q = multiprocessing.Queue() _preload_queue(q) upload_job = multiprocessing.Process(target=upload_worker.upload_worker, args=(q, )) upload_job.start() queue_job = multiprocessing.Process(target=_queue_worker, args=(abs_path, q)) queue_job.daemon = True queue_job.start() try: upload_job.join() queue_job.join() except KeyboardInterrupt: upload_job.terminate() queue_job.terminate() upload_job.join() queue_job.join()
def RunSpy(): user_ids = GetUserIds() # Из файла db.init() users_online = db.get_users() #getting last user status from DB start = time.time() try: print(datetime.datetime.now().strftime('%H:%M:%S'), 'Getting response. ') response = get_users(user_ids, 'online', timeout=3) except ResponseError as e: print(e) return end = time.time() print(datetime.datetime.now().strftime('%H:%M:%S'), 'Response came in {0:04.2f} sec'.format(end - start) ) now = datetime.datetime.now() now_fmttd = now.strftime('%Y-%m-%d %H:%M:%S') for user in response: if not user['id'] in users_online or user['online'] != users_online[user['id']]: users_online[user['id']] = user['online'] # add event to database db.add_online_status(user['id'], user['online'], now_fmttd) now = datetime.datetime.now() sleep_value = 10 print(now.strftime('%H:%M:%S'), 'Now waiting for {0} sec'.format(sleep_value)) time.sleep(sleep_value)
def run(): db.init() try: logging.info('\nPress Ctrl+C to stop') ros = get_ros() logging.info('\nRead sensors every %s seconds...' % REPEAT_DELAY_SECONDS) while True: start = time.time() sys.stdout.write('\r\033[K') for pin_num, (sensor_type, ro) in enumerate(zip(SENSOR_TYPES, ros)): val = adc.read(pin_num) dt = datetime.now(timezone.utc).astimezone() sys.stdout.write('%s=%g ' % (sensor_type, val)) sys.stdout.flush() temp, hum = openweather.get_temperature_and_rel_humidity() db.store_measurement(dt, sensor_type, val, ro, temp, hum) upload_recorded() sleep = REPEAT_DELAY_SECONDS + start - time.time() if sleep > 0: time.sleep(sleep) except KeyboardInterrupt: logging.info('Stopped by user') db.close_connection()
def do_listTable(self): """ """ # We need the name of table to be listed. # database name if not self.options.arg_database: self.options.arg_database = config.dbname database_filename = self.options.arg_database # table name try: tablename = self.args[0] except IndexError: # python emanual.py --list # We returns the list of tables print "You can use the name of database without db_ prefix." print self._listDbTable() return # Title the name of table table_instance = self._dbInstance(tablename) db.init(database_filename) tableobj = db.ListDb() tableobj.printa(table_instance)
def run(host, port, config_filename): """ Runs the server. @param host The host for the server @param port The port for the server """ import config config.load_config_file(config_filename) global triangulation, detectionserver, fingerprint import localization import detectionserver import fingerprint import db import detectionserver import pageserver db.init() config.app.run(host=host, port=int(port), debug=True)
def main(): db.init() while db.get_settings()["mode"] == "init": Inithandler() if conf.DEBUG: print("Engine setted in running mode!", flush=True) init_modules() Handler() wait_modules()
def main(args): if '--debug' in args: config.debug = True if config.debug: logging.getLogger('').setLevel(logging.DEBUG) db.init(config.debug) get_transactions(True)
def init(): db.init() yield server.init() yield channel.init() yield filter_.init() var.init_vars() var.init_ivars() yield list_.init_lists() yield speed_variant.init()
def main(args): if '--debug' in args: logging.getLogger('').setLevel(logging.DEBUG) db.init() with db.tx() as session: session.query(Transaction)\ .update({'member_id': None, 'type': None})
def main(db_file, begin, end): """ :param db_file: File to use as a database :param begin: Number to start prime checks on :param end: Number to end prime checks on :return: list of primes found """ db.init(db_file) return primes.isprimectl(db_file, begin, end)
def xloader_status(context, data_dict): ''' Get the status of a ckanext-xloader job for a certain resource. :param resource_id: The resource id of the resource that you want the status for. :type resource_id: string ''' p.toolkit.check_access('xloader_status', context, data_dict) if 'id' in data_dict: data_dict['resource_id'] = data_dict['id'] res_id = _get_or_bust(data_dict, 'resource_id') task = p.toolkit.get_action('task_status_show')(context, { 'entity_id': res_id, 'task_type': 'xloader', 'key': 'xloader' }) datapusher_url = config.get('ckan.datapusher.url') if not datapusher_url: raise p.toolkit.ValidationError( {'configuration': ['ckan.datapusher.url not in config file']}) value = json.loads(task['value']) job_id = value.get('job_id') url = None job_detail = None if job_id: # get logs from the xloader db db.init(config) job_detail = db.get_job(job_id) # timestamp is a date, so not sure why this code was there # for log in job_detail['logs']: # if 'timestamp' in log: # date = time.strptime( # log['timestamp'], "%Y-%m-%dT%H:%M:%S.%f") # date = datetime.datetime.utcfromtimestamp( # time.mktime(date)) # log['timestamp'] = date try: error = json.loads(task['error']) except ValueError: # this happens occasionally, such as when the job times out error = task['error'] return { 'status': task['state'], 'job_id': job_id, 'job_url': url, 'last_updated': task['last_updated'], 'task_info': job_detail, 'error': error, }
def download_songs(): """Download high and low quality mp3 previews for all songs in the database.""" db.init() db.execute(""" SELECT DISTINCT CONCAT(ar.name, " - ", s.name) AS name, s.id AS id, s.length as length FROM song s, artist ar, album a WHERE s.album_id = a.id AND a.artist_id = ar.id """) dl_opts = { 'format': 'best,worst', 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }, { 'key': 'ExecAfterDownload', 'exec_cmd': 'ffmpeg -y -i {} -ss 0 -to ' + str(P_LENGTH) + ' -map 0:0 temp.mp3' }, { 'key': 'ExecAfterDownload', 'exec_cmd': 'mv temp.mp3 {}' }], 'default_search': 'ytsearch1:', 'ignoreerrors': True } i_query = ("INSERT INTO `song_files` (`song_id`, `quality`, `path`)" " VALUES (%s, %s, %s)") for song in db.get_cursor().fetchall(): for fmt in ['best', 'worst']: sid = str(uuid.uuid4()) dl_opts['format'] = fmt dl_opts['outtmpl'] = OUT_DIR + sid + '.%(ext)s' dl_opts['match_filter'] = video_filter(song['length']) with youtube_dl.YoutubeDL(dl_opts) as ydl: ret = ydl.download([song['name']]) if ret == 0: data = (song['id'], fmt.upper(), SONGS_DIR + sid + '.mp3') db.execute(i_query, data) else: print('failed to download {}: skipping.'.format( song['name'])) db.close()
def main(): if 'DB_INIT' in os.environ: if os.environ['DB_INIT'] == 'TRUE': init(recreate=True) logger.info("db initialized. ") sys.exit(0) else: init(recreate=False) logger.info("db started. ") start_poller()
def before_request(): """Initializes database Parameters: None Return: None """ db.init()
def main(): log.msg("RUN main") # initialize app init() # initialize db log.msg("RUN db.init(app)") db.init(app) # this is probably pointless until there's a GUI of sorts app.jinja_env.auto_reload = True log.msg("RUN container.run") container.run(app=app, address="tcp:8080", debug=True)
def app(env, start_response): method = env["REQUEST_METHOD"] path = env["PATH_INFO"] db.init() # "Static" pages. if method == "GET" and path == "/": start_response("200 OK", [("Content-Type", "text/plain")]) return [msg.welcome % {"version": __version__}] elif path == "/.status": start_response("200 OK", [("Content-Type", "text/plain")]) return [ msg.status % { "version": __version__, "count": db.allocated_slots(), "cache": db.cache_len(), "last_id": b62encode(db.last_paste_id()), } ] # Get expiration if any path, exp = extract_expiration(path) # Wrong syntax try: id = b62decode(path[1:]) except ValueError: start_response("400 Bad Request", [("Content-Type", "text/plain")]) return ["Invalid VimPaste Id Syntax"] # Create a new post if method == "POST": data = env["wsgi.input"].read(int(env["CONTENT_LENGTH"]))[:MAX_LEN] try: new_id = db.save_paste(id, base64.b64encode(data), exp) except db.TooManySaves: start_response("400 Bad Request", [("Content-Type", "text/plain")]) print("Too many saves! Flood?") return ["Too many saves!"] print("New Paste: %d" % new_id) start_response("200 OK", [("Content-Type", "text/plain")]) return ["vp:%s" % b62encode(new_id)] # Document not found doc = db.get_paste(id) if not doc or doc["new"]: start_response("404 Not Found", [("Content-Type", "text/plain")]) return ["VimPaste Not Found"] start_response("200 OK", [("Content-Type", "text/plain")]) return [base64.b64decode(doc["raw"])]
def first_run(): path_config = get_conf_path() path_db = get_db_path() if not os.path.isdir(ad.user_data_dir): os.mkdir(ad.user_data_dir) if not os.path.isfile(path_config): logging.debug("Writing config at %s" % path_config) write_blank(path_config) if not os.path.isfile(path_db): logging.debug("Writing empty db at: %s" % path_db) db = db.DbManager(path_db) db.init()
def main(method): methods = ['timeline', 'init'] method = method.strip('--') if method not in methods: print 'ERROR: Invalid method. Please include a valid method.' sys.exit(1) if method == 'init': init() elif method == 'timeline': run_timeline(AUTH)
def run_indefinitely(): log("Starting up..") # provoke errors early db.init() rfweb.init() upload.init() while True: delay = run_one_cycle() + datetime.timedelta(seconds = random.normalvariate(mu=0.0, sigma=15.0)) log ("Sleeping for {delay}", **locals()) time.sleep(delay.total_seconds())
def init(): """Initialise and configure the app, database, scheduler, etc. This should be called once at application startup or at tests startup (and not e.g. called once for each test case). """ global _users, _names _configure_app(app) _users, _names = _init_login_manager(app) _configure_logger() init_scheduler(app.config.get('SQLALCHEMY_DATABASE_URI')) db.init(app.config.get('SQLALCHEMY_DATABASE_URI'))
def start(): db.init() row = db.load_all_user() if row: for item in row: command = "sh startss.sh " + str( item[2]) + " " + item[1] + " " + item[3] os.system(command) import signal signal.signal(signal.SIGTERM, close) from BaseHTTPServer import HTTPServer server = HTTPServer(('', 8080), GetHandler) print 'Starting server, use <Ctrl-C> to stop' server.serve_forever()
def init(): log.warning( f'running from {os.getcwd()}' ) if not os.path.isfile(db.db_file): log.warning("No database file found at {}".format(db.db_file)) h.printcolor("No database file found at {}".format(db.db_file), color = h.bcolors.WARNING) db.create_database() else: log.warning("database file OK") h.printcolor("database file OK", color = h.bcolors.OKGREEN) db.connect_database() db.init()
def fetch_sensor_db_data(sensor_id, start,end): assert end > start db.init() static_data = db.query_dataframe(""" SELECT time,min,max,average,variance FROM monitoring.sensor_data WHERE sensor_id = %s AND time >= %s AND time <= %s ORDER BY time ASC """, (sensor_id, start,end)) return static_data
def main(token, storage): updater = Updater(token=token) dispatcher = updater.dispatcher log.log(log.INFO, 'Bot controller started') # load database and other global init db.init(storage) dispatcher.add_handler(CommandHandler('start', start)) dispatcher.add_handler(CommandHandler('bet', bet, pass_args=True)) updater.dispatcher.add_handler(CallbackQueryHandler(button)) log.log(log.INFO, 'Starting to poll ...') updater.start_polling() updater.idle()
def execute(cols=None, where=None, limit=100, offset=0, order_by=[]): """select header""" dataset = db.init(docs=cached_db(state)) query = dataset.frm("docs") if cols: query = query.select(cols) # query contains aggregate expresions, automatically add # group_by columns group_by = query.operations if isinstance(group_by, GroupByOp): exprs = group_by.relation.exprs names = [] for e in exprs: if isinstance(e, Var): names.append(e.path) elif isinstance(e, Function): if not is_aggregate(e, dataset): names.append(e.name) query = query.group_by(",".join(names)) if where: query = query.where(where) if order_by: query = query.order_by(order_by) query = query.limit(limit).offset(offset) return json.dumps(dict(schema=[f.name for f in query.schema.fields], records=list(query)))
def init(): global client_status try: db.init() print("Database loaded!") except: print("Error loading database. Please, reinstall client.") client_status = 1 try: threading.Thread(target=receving, args=(s1, )).start() threading.Thread(target=receving, args=(s2, )).start() init_connection(s2) except Exception as e: print("Error.") print(e) pass
def generate_table(devices, t_len=10): con = db.init("TSS.db") cursor = con.cursor() function_names = [ "getRawGyroscopeRate", "getRawAccelerometerData", "getRawCompassData" ] try: # run the recording for t_len seconds t_end = time.time() + t_len while time.time( ) < t_end or t_len == -1: # -1 debug tool for endless record # this is the mode with all 3 other function row = [] for device in devices: for func_name in function_names: for data in getattr(device, func_name)(): row.append(data) time.sleep(.5) # deal with the data collected importlib.reload(db_parser) try: query = db_parser.parse_to_query(row) except Exception as e: print(e) else: cursor.execute(query) except KeyboardInterrupt: print("stopped") con.commit()
def run(host, port, config_file): import config config.load_config_file(config_file) global triangulation, detectionserver, fingerprint import localize import remserver import fingerprint import db import detectionserver import pageserver db.init() config.app.run(host=host, port=int(port), debug=True)
def __init__(self, port=None, db_name='data'): """run my node""" db.init(db_name) if port: self.port = port self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.socket.bind((self.host, self.port)) for node in db.Node.select(): print('exist', node, node.addr, node.port) self.call(node.addr, 'hello') #self.listen() self.listen_xmpp()
def __init__(self, function_yaml): """Constructor. @param function_yaml YAML object for function map. """ self._function_yaml = function_yaml self.conn = db.init("sqlite3", dbfile="/home/kabe/Archives/prof.db")
def run(self): # hacks IOStream to ramp up upload size limit def hack_iostream(method): @functools.wraps(method) def wrapper(self, *args, **kwargs): method(self, *args, **kwargs) self.max_buffer_size = 200*1024*1024 # 200 MB return wrapper tornado.iostream.IOStream.__init__ = hack_iostream(tornado.iostream.IOStream.__init__) application = tornado.web.Application(router.get_routes(), **self.SETTINGS) application.listen(8888) comm.setup() db.init() tornado.ioloop.IOLoop.instance().start()
def main(): """Entry point for stand-alone execution.""" conf.init(), db.init(conf.DbPath) inqueue = LineQueue(sys.stdin).queue outqueue = type("", (), {"put": lambda self, x: print("\r%s" % x, end=" ")})() if "--quiet" in sys.argv: outqueue = None if conf.MouseEnabled: inqueue.put("mouse_start") if conf.KeyboardEnabled: inqueue.put("keyboard_start") start(inqueue, outqueue)
def create_app(config=None): """ Create and initialise the application. """ app = Flask(__name__) app.config.from_pyfile('%s/config/default.py' % app.root_path) if config: app.config.from_pyfile(config) elif os.getenv('FLASK_CONFIG'): app.config.from_envvar('FLASK_CONFIG') db.init(app) jinja.init(app) app.register_blueprint(views.main) return app
def query(statement): dataset = db.init(docs=cached_db(state)) query = dataset.query(statement) if not isinstance(query.operations, SliceOp): query = dataset.frm(query).limit(100) result = query.execute() return json.dumps(dict(schema=[f.name for f in result.schema.fields], records=list(result)))
def __init__(self): db.init() handlers = [ (r"/", MainHandler), (r"/login", login.LoginHandler), (r"/logout", login.LogoutHandler), (r"/invite", login.InviteHandler), (r"/verify/([^/]+)", login.VerifyHandler), (r"/reset", login.ResetPasswordHandler), (r"/reset/([^/]+)", login.ResetPasswordLinkHandler), (r"/addgame", AddGameHandler), (r"/leaderboard(/[^/]*)?", LeaderboardHandler), (r"/leaderdata(/[^/]*)?", LeaderDataHandler), (r"/history(/[0-9]+)?", HistoryHandler), (r"/playerhistory/(.*?)(/[0-9]+)?", PlayerHistory), (r"/playerstats/(.*)", PlayerStats), (r"/seating", seating.SeatingHandler), (r"/seating/regentables", seating.RegenTables), (r"/seating/clearcurrentplayers", seating.ClearCurrentPlayers), (r"/seating/addcurrentplayer", seating.AddCurrentPlayer), (r"/seating/removeplayer", seating.RemovePlayer), (r"/seating/prioritizeplayer", seating.PrioritizePlayer), (r"/seating/currentplayers.json", seating.CurrentPlayers), (r"/seating/currenttables.json", seating.CurrentTables), (r"/seating/players.json", seating.PlayersList), (r"/pointcalculator", PointCalculator), (r"/admin", admin.AdminPanelHandler), (r"/admin/users", admin.ManageUsersHandler), (r"/admin/delete/([0-9]*)", admin.DeleteGameHandler), (r"/admin/edit/([0-9]*)", admin.EditGameHandler), (r"/admin/promote/([0-9]*)", admin.PromoteUserHandler), (r"/admin/demote/([0-9]*)", admin.DemoteUserHandler), ] settings = dict( template_path = os.path.join(os.path.dirname(__file__), "templates"), static_path = os.path.join(os.path.dirname(__file__), "static"), debug = True, cookie_secret = cookie_secret, login_url = "/login" ) tornado.web.Application.__init__(self, handlers, **settings)
def start( id=default_id, remote=None, port=default_port, use_lock=False, use_purple=False, use_idle=False, dummy=False, central=False, ): db.init() report = Report(args.remote) if remote else None temperature.run(id, report, dummy=dummy) if use_lock: lock.run(id) if use_purple: purple.run(id) if use_idle and not dummy: idle.run(id, report) status.run(id) if central: bottle.run(server=bottle.PasteServer, host="0.0.0.0", port=port) else: wait_for_interrupt()
def main(self): """Main function. @param self @todo library などの指定を可能にする @todo SQLite3 のとき DB ファイルの指定を可能にする @todo -t でテストにする @todo class Parp など """ self.parse_opt() # Data Prepare logdir = self.args[0] funcmapfile = self.args[1] self.load_profs(logdir, funcmapfile) ## Unique nodes list self.nodeset = util.node_set(self.profs) # Prepare information to add self.prepare_registration() # DB prepare #self.conn = db.init("postgres", username="******", hostname="127.0.0.1") self.conn = db.init("sqlite3", dbfile="/home/kabe/Archives/prof.db") ### BEGIN TRANSACTION ### self.conn.begin_transaction() # Register try: # Profgroup group_id = self.add_profgroup() # ProfExec Insert profexec_id = self.add_profexec(group_id) util.out(group_id, profexec_id) # Profile Insert self.insert_profile(profexec_id) except Exception, e: util.err("Exception in main", repr(e)) self.conn.rollback_transaction() raise # Re-raise the exception
if group: sql += " GROUP BY " + group if order: get_direction = lambda c: (c if isinstance(c, basestring) else "DESC" if c else "ASC") sql += " ORDER BY " for i, col in enumerate(order): name = col[0] if isinstance(col, (list, tuple)) else col direction = "" if name == col else " " + get_direction(col[1]) sql += (", " if i else "") + name + direction if limit: sql += " LIMIT %s" % (", ".join(map(str, limit))) return sql, args if "__main__" == __name__: import db db.init(":memory:", "CREATE TABLE test (id INTEGER PRIMARY KEY, val TEXT)") print("Inserted ID %s." % db.insert("test", val=None)) for i in range(5): print("Inserted ID %s." % db.insert("test", {"val": i})) print("Fetch ID 1: %s." % db.fetch("test", id=1)) print("Fetch all up to 3, order by val: %s." % db.fetchall("test", order="val", limit=3)) print("Updated %s row where val is NULL." % db.update("test", {"val": "new"}, val=None)) print("Select where val IN [0, 1, 2]: %s." % db.fetchall("test", val=("IN", range(3)))) print("Delete %s row where val=0." % db.delete("test", val=0)) print("Fetch all, order by val: %s." % db.fetchall("test", order="val")) db.execute("DROP TABLE test") db.close()
def schema(relation_name): dataset = db.init(docs=cached_db(state)) return dataset.get_schema(relation_name).to_dict()
import sys sys.path.append(app_path+u'lib') import pyrtm import db import options import e32 # App configuration apiKey = 'e8f0dd71d905f815af42b4752de411b5' secret = 'd64c0294efd97ddd' #token='c620b829b667a3e8c828dd28bb7cc132017029fc' if not db.init(app_path + u"rtm.db"): options.init() #TODO more db initialization rtm = pyrtm.RTM(apiKey, secret) rtm.token = options.get_option('token') if not rtm.token: frob = options.get_option('frob') if frob: rtm.frob = frob try: rtm.getToken() options.set_option('token',rtm.token) except RTMAPIError:
import pyen import simplejson as json import db en = pyen.Pyen() start = 0 total = 5000 batch_size = 1000 page_size = 100 max_hotttnesss = 1 names = set() db.init(quick=False) while len(names) < total: print len(names), "found, hotttnesss is", max_hotttnesss for start in xrange(0, batch_size, page_size): response = en.get( "artist/search", start=start, sort="hotttnesss-desc", results=page_size, max_hotttnesss=max_hotttnesss, bucket="hotttnesss", ) for a in response["artists"]: name = a["name"] results = db.artist_search(name)
def mode_init(args): db.init()
#!/bin/usr/python # ------------------------------------------------------------------------------------------------------------- # Copyright 2013 Oleksiy Voronin # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and limitations under the License # ------------------------------------------------------------------------------------------------------------- import datetime import iotd import db TIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f' if __name__ == '__main__': db.init('data/qotd-img.sqlite') dt = datetime.datetime.now().strftime(TIME_FORMAT) print 'D: [{}] Grabbing NASA Image Of The Day'.format(dt) if iotd.retrieve_and_store(db): print 'D: [{}] Retrieved new NASA Image Of The Day'.format(dt) else: print 'D: [{}] Latest NASA Image Of The Day is already retrieved'.format(dt)
from flask import Flask from flask.ext.script import Manager import db app = Flask(__name__) acdb = db.init(app) from cmd.airnow import ( ForecastAreas, MonitoringSites, Hourly, ReportingAreas, LoadAreas, LoadSites, LoadHourly, GribDownload, GribProcess, Grib, ) from cmd.data import ParseData, ParseForecast from cmd.utah.data import Current, Forecast # app = Flask(__name__) # configure your app manager = Manager(app) manager.add_command("parse_data", ParseData()) manager.add_command("parse_forecast", ParseForecast()) manager.add_command("airnow_forecast_areas", ForecastAreas()) manager.add_command("airnow_monitoring_sites", MonitoringSites())
kw = dict() if success: kw['status'] = _DONE kw['task_result'] = task_result else: retried = task.retried + 1 kw['retried'] = retried kw['status'] = _ERROR if task.retried >= task.max_retry else _PENDING db.update_kw('tasks', 'id=?', task_id, **kw) def set_task_timeout(task_id): pass def delete_task(task_id): db.update('delete from tasks where id=?', task_id) def notify_task(task): pass if __name__=='__main__': sys.path.append('.') dbpath = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'doc_test.sqlite3.db') _log(dbpath) if os.path.isfile(dbpath): os.remove(dbpath) db.init('sqlite3', dbpath, '') db.update('create table tasks (id text not null, queue text not null, name text not null, callback text not null, timeout integer not null, status text not null, max_retry integer not null, retried integer not null, creation_time real not null, execution_time real not null, execution_start_time real not null, execution_end_time real not null, execution_expired_time real not null, version integer not null, task_data text not null, task_result text not null);') import doctest doctest.testmod() os.remove(dbpath)
no_path += 1 print print 'paths:', paths print 'no paths:', no_path if paths > 0: avg_time = sum_time / paths avg_length = sum_length / paths print 'avg length:', avg_length print 'avg time:', avg_time print 'max length:', max_length print 'max time:', max_time if __name__ == '__main__': quick = len(sys.argv) > 1 and sys.argv[1] == '--quick' cli = CLI() db.init(quick=quick) while True: try: cli.cmdloop() break except: print "Error:", sys.exc_info()[0] print "Type:", sys.exc_info()[1] print 'Traceback:' traceback.print_tb(sys.exc_info()[2])
db.update_game_player(game_id, player_id, "red", "True") else: if i % 2 == 0: db.update_game_player(game_id, player_id, "blue", "False") else: db.update_game_player(game_id, player_id, "red", "False") ######################################################################### ### General Database checking functions for everywhere ### def player_in_game(response, roomcode): player_id = get_player_id_cookies(response) in_game = db.player_in_game(player_id, roomcode) return in_game server = Server() #server.register("/", index, post=add_name_page) server.register("/", index_page) server.register("/register", register_user_post) server.register("/login", login_post) server.register("/join_game", join_game_page) server.register("/game/create", create_game_post) server.register("/game/join", join_game_post) server.register("/lobby/([a-z]+)", lobby_page) server.register("/game/startgame/([a-z]+)", start_game_post) server.register("/game/([a-z]=)", game_page) db.init() server.run()
def main(): """ Downloads and analyzes a bunch of random Wikipedia articles using online VB for LDA. """ # The number of documents to analyze each iteration batchsize = 64 # The total number of documents in Wikipedia D = 3.3e6 # The number of topics K = 100 # How many documents to look at if (len(sys.argv) < 2): documentstoanalyze = int(D/batchsize) else: documentstoanalyze = int(sys.argv[1]) # Our vocabulary vocab = file('./dictnostops.txt').readlines() W = len(vocab) # Add terms and topics to the DB db.init() db.add_terms(vocab) db.add_topics(K) # Initialize the algorithm with alpha=1/K, eta=1/K, tau_0=1024, kappa=0.7 olda = onlineldavb.OnlineLDA(vocab, K, D, 1./K, 1./K, 1024., 0.7) # Run until we've seen D documents. (Feel free to interrupt *much* # sooner than this.) for iteration in range(0, documentstoanalyze): # Download some articles (docset, articlenames) = \ wikirandom.get_random_wikipedia_articles(batchsize) # Give them to online LDA (gamma, bound) = olda.update_lambda(docset) # Compute an estimate of held-out perplexity (wordids, wordcts) = onlineldavb.parse_doc_list(docset, olda._vocab) # Arrays for adding batches of data to the DB doc_array = [] doc_term_array = [] for d in range(len(articlenames)): doc_array.append((articlenames[d], docset[d])) # Add a batch of docs to the DB; this is the one DB task that is not in # the separate DB write thread since later tasks depend on having doc ids. # Since writes take so long, this also balaces the two threads time-wise. doc_ids = db.add_docs(doc_array) doc_topic_array = [] for d in range(len(gamma)): doc_size = len(docset[d]) for k in range(len(gamma[d])): doc_topic_array.append((doc_ids[d], k, gamma[d][k], gamma[d][k]/doc_size)) db.add_doc_topics(doc_topic_array) perwordbound = bound * len(docset) / (D * sum(map(sum, wordcts))) print '%d: rho_t = %f, held-out perplexity estimate = %f' % \ (iteration, olda._rhot, numpy.exp(-perwordbound)) # Save lambda, the parameters to the variational distributions # over topics, and gamma, the parameters to the variational # distributions over topic weights for the articles analyzed in # the last iteration. if (iteration % 10 == 0): numpy.savetxt('lambda-%d.dat' % iteration, olda._lambda) numpy.savetxt('gamma-%d.dat' % iteration, gamma) topic_terms_array =[] for topic in range(len(olda._lambda)): lambda_sum = sum(olda._lambda[topic]) for term in range(len(olda._lambda[topic])): topic_terms_array.append((topic, term, olda._lambda[topic][term]/lambda_sum)) db.update_topic_terms(K, topic_terms_array) gc.collect() # probably not necesary, but precautionary for long runs db.print_task_update() db.increment_batch_count() # The DB thread ends only when it has both run out of tasks and it has been # signaled that it will not be recieving any more tasks db.signal_end()
def main(): """ Analyzes scraped pages using scikit-learn.LDA """ # The number of topics K = 10 # no of documents D = 300 n_features = 1000 # Our vocabulary vocab = list(set(file('./vocab').readlines())) W = len(vocab) # Add terms and topics to the DB db.init() db.add_terms(vocab) db.add_topics(K) olda = onlineldavb.OnlineLDA(vocab, K, D, 1./K, 1./K, 1024., 0.7) # grab documents ### Load your scraped pages, re-tokenize, and vectorize result. docset, docnames = [], [] for filename in os.listdir(os.getcwd()): if filename.endswith('.html'): tree = html.parse(filename) try: encoding = tree.xpath('//meta/@charset')[0] except IndexError: encoding = 'utf-8' with open(filename) as page: rawtext = page.read() try: rawtext = rawtext.decode(encoding, errors='backslashreplace') except TypeError: continue # encoding issues, see http://stackoverflow.com/questions/19527279/python-unicode-to-ascii-conversion docset += [clean_html(rawtext)] docnames += [filename[:-5]] if not(len(docset) % 10): print("loaded " + str(len(docset)) + " documents") # Give them to online LDA # Also computes an estimate of held-out perplexity (wordids, wordcts) = onlineldavb.parse_doc_list(docset, olda._vocab) (gamma, bound) = olda.update_lambda(wordids, wordcts) # Arrays for adding batches of data to the DB # doc_array = [] # doc_term_array = [] # for d in range(len(docnames)): # doc_array.append((docnames[d], docset[d])) doc_array = zip(docnames, docset) # Add a batch of docs to the DB; this is the one DB task that is not in # the separate DB write thread since later tasks depend on having doc ids. # Since writes take so long, this also balaces the two threads time-wise. doc_ids = db.add_docs(doc_array) doc_topic_array = [] for d in range(len(gamma)): doc_size = len(docset[d]) for k in range(len(gamma[d])): doc_topic_array.append((doc_ids[d], k, gamma[d][k], gamma[d][k]/doc_size)) db.add_doc_topics(doc_topic_array) perwordbound = bound * len(docset) / (D * sum(map(sum, wordcts))) print '%d: rho_t = %f, held-out perplexity estimate = %f' % \ (1, olda._rhot, numpy.exp(-perwordbound)) # Save lambda, the parameters to the variational distributions # over topics, and gamma, the parameters to the variational # distributions over topic weights for the articles analyzed in # the last iteration. numpy.savetxt('lambda-%d.dat' % 1, olda._lambda) numpy.savetxt('gamma-%d.dat' % 1, gamma) topic_terms_array = [] for topic in range(len(olda._lambda)): lambda_sum = sum(olda._lambda[topic]) for term in range(len(olda._lambda[topic])): topic_terms_array.append((topic, term, olda._lambda[topic][term]/lambda_sum)) db.update_topic_terms(K, topic_terms_array) gc.collect() # probably not necesary, but precautionary for long runs db.print_task_update() # The DB thread ends only when it has both run out of tasks and it has been # signaled that it will not be recieving any more tasks db.increment_batch_count() db.signal_end()