def main(): log.info('otrrentworker start main in {} environment....'.format( config['APPLICATION_ENVIRONMENT'])) """ initiate transmission-deamon """ daemonstarted = True if not config['APPLICATION_ENVIRONMENT'] in ['Development']: daemonstarted = start_transmission() if daemonstarted: """ schedule workers """ if config['APPLICATION_ENVIRONMENT'] == 'Development': schedule.every(1).minutes.do(runetl, config, log) """ log configuration in debug mode """ for key, value in config.items(): log.debug('otrrentworker configuration: {} = {!s}'.format( key, value)) elif config['APPLICATION_ENVIRONMENT'] == 'Test': schedule.every(1).minutes.do(runworker, config, log) schedule.every(1).hours.do(runetl, config, log) else: schedule.every(5).minutes.do(runworker, config, log) schedule.every().day.at("12:00").do(runetl, config, log) schedule.every().day.at("00:00").do(runetl, config, log) """ run until stopsignal """ while not stopsignal: schedule.run_pending() time.sleep(1) """ goodby """ log.info('otrrentworker service terminated. Goodby!')
def process_pipe_options(): """Process options from config file to setup beam job execution. :rtype: tuple :returns: args to setup the job and the PipelineOptions. """ try: from config import config except ImportError: raise ImportError("Please create a config file to run this job") parser = argparse.ArgumentParser() parser.add_argument('--input', dest='input', help=('Input from where to retrieve similarities' ' scores.')) parser.add_argument('--kind', dest='kind', help=('kind to descrive datastore entities')) parser.add_argument('--project', dest='project', help=('Name of project where Datastore will be used')) parser.add_argument('--similarities_cap', dest='sim_cap', type=int, help=('How many items and scores are allowed to be ', 'exported to DS')) args = ["--{}={}".format(k, v) for k, v in config.items()] args, pipe_args = parser.parse_known_args(args) pipe_args.extend(['--project={}'.format(args.project)]) return (args, PipelineOptions(pipe_args))
def get_conf_dict(): conf = {} for section in config.sections(): conf[section] = {} for item in config.items(section): conf[section][item[0]] = item[1] return conf
def download_js_package(config_file_path): with open(config_file_path, 'r') as f: config = json.loads(f.read()) for name, url in config.items(): print(name, url) with urlopen(url) as resp: with ZipFile(BytesIO(resp.read())) as zipfile: zipfile.extractall(os.path.dirname(config_file_path))
def updatePrices(self): logging.info("Loading PoE Ninja data") for (itemType, url) in config.items("PoeNinjaApi"): self.prices[itemType] = self.getPrices(url) self.currencyPrices = self.getCurrencyPrices() latestupdate = time.time() logger.info("Item Price List created")
def testConfig(): # secret_key = app.config['SECRET_KEY'] dict = config.items() print(dict) for k, v in dict: print(k) print(v) repo = current_app.config.get('SQLALCHEMY_MIGRATE_REPO') or "default" print("test test") return "hello test blueprint==" + repo
def cursor(dbname): """ Return a TableCursor for the given database. This could at some point implement connectionpooling for threads, but as of now, it does not. """ if dbname not in _databases: dbmodule = config.get(dbname, "module") Handler = DBHANDLER.get(dbmodule, DBBaseConnection) conn = Handler(**dict((name, value) for (name, value) in config.items(dbname))) _databases[dbname] = conn return _databases[dbname].cursor()
def get_symbol_embedding(): tmp_config = {} for k, v in config.items(): if 'num_classes' == k or 'num_layers' == k: continue tmp_config[k] = v embedding = eval(config.net_name).get_symbol(config.emb_size, config.num_layers, **tmp_config) all_label = mx.symbol.Variable('softmax_label') #embedding = mx.symbol.BlockGrad(embedding) all_label = mx.symbol.BlockGrad(all_label) out_list = [embedding, all_label] out = mx.symbol.Group(out_list) return out
def main(): nextrun = datetime.utcnow() log.info('otrserver start main....') """ log configuration in debug mode """ if config['APPLICATION_LOG_LEVEL'] == 'DEBUG': for key, value in config.items(): log.debug('otrrentserver configuration: {} = {!s}'.format( key, value)) """ run until stopsignal """ while not stopsignal: if (datetime.utcnow() >= nextrun): """ run etl """ log.info('run ETL') genres = import_otrgenres() """ loop back for 10 days and import""" iterdate = datetime.now().date() - timedelta(days=10) startdate = datetime.now().date() - timedelta(days=8) enddate = datetime.now().date() - timedelta(days=1) while (iterdate <= enddate): if (iterdate < startdate): """ housekeeping(iterdate) """ else: import_otrepg(iterdate, genres) pass iterdate = iterdate + timedelta(days=1) update_toprecordings() update_torrents(startdate) nextrun = datetime.utcnow() + timedelta( seconds=config['APPLICATION_ETL_INTERVAL']) log.info('next runtime ETL in {!s} seconds at {!s}'.format( config['APPLICATION_ETL_INTERVAL'], nextrun)) """ goodby """ log.info('otrrentserver main terminated. Goodby!')
def get_default_args(self, merge=None): """ Create and return a default argument hash. Optionally merge the specified dictionary into the result. """ # No backend specific defaults should be here, those values # would be defined within the relevant backend module or in # the outline configuration file. defaults = {} # By default all layers are registerd. Registered layers show up # as discrete layers. Unregisterd layers are generally embedded # in registered layers. defaults["register"] = True # The default chunk size. defaults["chunk"] = 1 # A null frame range indicates the event # will default to the overall frame range # defined in the parent outline. defaults["range"] = None # Now apply any settings found in the configuration file. # This settings override the procedural defaults set in # the layer constructur using default_arg method. if config.has_section(self.__class__.__name__): for key, value in config.items(self.__class__.__name__): defaults[key] = value # Now apply user supplied arguments. These arguments override # both the defaults and the class condifuration file. if merge: defaults.update(merge) return defaults
exit(2) HTTPLOGGER = logging.Logger(name="HTTP", level=LOG_LEVEL) fh = logging.FileHandler(logfile_http) fh.setFormatter(logging.Formatter(fmt="%(asctime)19.19s: %(message)s")) HTTPLOGGER.addHandler(fh) logging.basicConfig(format=LOG_FORMAT, level=LOG_LEVEL, filename = logfile) #sys.stdout = open(logfile_http,'a',1) #sys.stderr = open(errorlog, 'a', 1) Database.loadConfig(config) handlerClasses = {} i = 0 for item in config.items('handlers'): handler, active = item active = bool(int(active)) if not active: print 'Skipped module:', handler continue c = handler.capitalize() print 'Loading module:', handler exec "from clienthandler.%s import %s" % (handler, c) handlerClasses[handler] = eval(c) handlerClasses[handler].loadConfig(config) i = i+1 print 'Successfully loaded %i modules' % (i)
exit(2) HTTPLOGGER = logging.Logger(name="HTTP", level=LOG_LEVEL) fh = logging.FileHandler(logfile_http) fh.setFormatter(logging.Formatter(fmt="%(asctime)19.19s: %(message)s")) HTTPLOGGER.addHandler(fh) logging.basicConfig(format=LOG_FORMAT, level=LOG_LEVEL, filename=logfile) # sys.stdout = open(logfile_http,'a',1) # sys.stderr = open(errorlog, 'a', 1) Database.loadConfig(config) handlerClasses = {} i = 0 for item in config.items("handlers"): handler, active = item active = bool(int(active)) if not active: print "Skipped module:", handler continue c = handler.capitalize() print "Loading module:", handler exec "from clienthandler.%s import %s" % (handler, c) handlerClasses[handler] = eval(c) handlerClasses[handler].loadConfig(config) i = i + 1 print "Successfully loaded %i modules" % (i)
def _gen_data_config(self): for key1, value1 in config.items(): for key2, value2 in value1.items(): cnf_data[key1][key2] = value2
def __init__(self): self.config = dict(config.items('mysql')) super(Mysql, self).__init__(**self.config)
base_path = '../runs/{}'.format(config['directory']) models_path = '{}/models'.format(base_path) config_path = '{}/config.json'.format(base_path) train_result_path = '{}/train_result.npy'.format(base_path) transfer_config_path = '../runs/{}/config.json'.format(config['transfer_from']) if config['transfer_from'] else None transfer_models_path = '../runs/{}/models'.format(config['transfer_from']) if config['transfer_from'] else None pathlib.Path(models_path).mkdir(parents=True, exist_ok=True) # Save the config, and check if a different one exists if pathlib.Path(config_path).is_file(): with open(config_path, 'r') as config_file: existing_config = json.load(config_file) if existing_config != config: print('Found an existing config with different values:') for k, v in config.items(): if existing_config[k] != v: print(' {}: {} -> {}'.format(k, existing_config[k], v)) if args.force: print('Overwriting with new config...') else: print('To overwrite and train anyway, run again with --force') exit() pathlib.Path(models_path).mkdir(parents=True, exist_ok=True) with open(config_path, 'w') as config_file: json.dump(config, config_file, indent=4, separators=(',', ': ')) # Also load the transfer config if there is one if transfer_config_path and pathlib.Path(transfer_config_path).is_file(): with open(transfer_config_path, 'r') as config_file: transfer_config = json.load(config_file)
def __init__( self ): # Set Pandas to output all columns in the dataframe pd.set_option( 'display.max_columns', None ) pd.set_option( 'display.width', 300 ) print( '-- Configuration ------------------------' ) for c in self.default_config: isDefined = config.get( c ) if ( not isDefined ): config[ c ] = self.default_config[ c ] if ( not config[ 'username' ] or not config[ 'password' ] ): print( 'RobinHood credentials not found in config file. Aborting.' ) exit() if ( config[ 'rsi_period' ] > config[ 'moving_average_periods' ][ 'sma_fast' ] ): self.min_consecutive_samples = config[ 'rsi_period' ] else: self.min_consecutive_samples = config[ 'moving_average_periods' ][ 'sma_fast' ] for a_key, a_value in config.items(): if ( a_key == 'username' or a_key == 'password' ): continue print( a_key.replace( '_', ' ' ).capitalize(), ': ', a_value, sep='' ) print( '-- End Configuration --------------------' ) if path.exists( 'orders.pickle' ): # Load state print( 'Loading previously saved state' ) with open( 'orders.pickle', 'rb' ) as f: self.orders = pickle.load( f ) else: # Start from scratch print( 'No state saved, starting from scratch' ) # Load data points if ( path.exists( 'dataframe.pickle' ) ): self.data = pd.read_pickle( 'dataframe.pickle' ) # Only track up to a fixed amount of data points self.data = self.data.tail( config[ 'max_data_rows' ] - 1 ) else: # Download historical data from Kraken column_names = [ 'timestamp' ] for a_robinhood_ticker in config[ 'ticker_list' ].values(): column_names.append( a_robinhood_ticker ) self.data = pd.DataFrame( columns = column_names ) for a_kraken_ticker, a_robinhood_ticker in config[ 'ticker_list' ].items(): try: result = get_json( 'https://api.kraken.com/0/public/OHLC?interval=' + str( config[ 'minutes_between_updates' ] ) + '&pair=' + a_kraken_ticker ).json() historical_data = pd.DataFrame( result[ 'result' ][ a_kraken_ticker ] ) historical_data = historical_data[ [ 0, 1 ] ] # Be nice to the Kraken API sleep( 3 ) except: print( 'An exception occurred retrieving historical data from Kraken.' ) # Convert timestamps self.data[ 'timestamp' ] = [ datetime.fromtimestamp( x ).strftime( "%Y-%m-%d %H:%M" ) for x in historical_data[ 0 ] ] # Copy the data self.data[ a_robinhood_ticker ] = [ round( float( x ), 3 ) for x in historical_data[ 1 ] ] # Calculate the indicators self.data[ a_robinhood_ticker + '_SMA_F' ] = self.data[ a_robinhood_ticker ].shift( 1 ).rolling( window = config[ 'moving_average_periods' ][ 'sma_fast' ] ).mean() self.data[ a_robinhood_ticker + '_SMA_S' ] = self.data[ a_robinhood_ticker ].shift( 1 ).rolling( window = config[ 'moving_average_periods' ][ 'sma_slow' ] ).mean() self.data[ a_robinhood_ticker + '_RSI' ] = RSI( self.data[ a_robinhood_ticker ].values, timeperiod = config[ 'rsi_period' ] ) self.data[ a_robinhood_ticker + '_MACD' ], self.data[ a_robinhood_ticker + '_MACD_S' ], macd_hist = MACD( self.data[ a_robinhood_ticker ].values, fastperiod = config[ 'moving_average_periods' ][ 'macd_fast' ], slowperiod = config[ 'moving_average_periods' ][ 'macd_slow' ], signalperiod = config[ 'moving_average_periods' ][ 'macd_signal' ] ) # Connect to RobinHood if ( not config[ 'debug_enabled' ] ): try: rh_response = rh.login( config[ 'username' ], config[ 'password' ] ) except: print( 'Got exception while attempting to log into RobinHood.' ) exit() # Download RobinHood parameters for a_robinhood_ticker in config[ 'ticker_list' ].values(): if ( not config[ 'debug_enabled' ] ): try: result = rh.get_crypto_info( a_robinhood_ticker ) s_inc = result[ 'min_order_quantity_increment' ] p_inc = result[ 'min_order_price_increment' ] except: print( 'Failed to get increments from RobinHood.' ) exit() else: s_inc = 0.0001 p_inc = 0.0001 self.min_share_increments.update( { a_robinhood_ticker: float( s_inc ) } ) self.min_price_increments.update( { a_robinhood_ticker: float( p_inc ) } ) # Initialize the available_cash amount self.available_cash = self.get_available_cash() print( 'Bot Ready' ) return
def get_config(self): config = {'ratio': self.ratio} base_config = super(NoiseLayer, self).get_config() return dict(list(base_config.items()) + list(config.items())) return dict(list(base_config.items()) + list(config.items()))
def is_transfer_compatible(config, transfer_config): net_params = { k: v for k, v in config.items() if k.startswith('net_') } net_params_transfer = { k: v for k, v in transfer_config.items() if k.startswith('net_') } return net_params == net_params_transfer
def get_valve_name_by_addr(addr): for item in config.items('valves'): if str(item[1]) == str(addr): return item[0] return False
def get_user(): user = config.items('user') return (user[0][1], user[1][1])
def configure(self): print ("Configuring...") for reg, value in config.items(): self.spi_write(registers.get(reg), value) return True
def __init__(self): # Set Pandas to output all columns in the dataframe pd.set_option('display.max_columns', None) pd.set_option('display.width', 300) print('-- Configuration ------------------------') for c in self.default_config: isDefined = config.get(c) if not isDefined: config[c] = self.default_config[c] if not config['username'] or not config['password']: print('RobinHood credentials not found in config file. Aborting.') exit() if config['rsi_period'] > config['moving_average_periods']['sma_fast']: self.min_consecutive_samples = config['rsi_period'] else: self.min_consecutive_samples = config['moving_average_periods'][ 'sma_fast'] for a_key, a_value in config.items(): if (a_key == 'username' or a_key == 'password'): continue print(a_key.replace('_', ' ').capitalize(), ': ', a_value, sep='') print('-- Init Environment ---------------------') # Initialize folders where to store data and charts if not path.exists('pickle'): makedirs('pickle') if not path.exists('charts'): makedirs('charts') if path.exists('pickle/orders.pickle'): # Load state print('Loading saved orders') with open('pickle/orders.pickle', 'rb') as f: self.orders = pickle.load(f) else: # Start from scratch print('No state saved, starting from scratch') # Load data points if path.exists('pickle/dataframe.pickle'): print('Loading saved dataset') self.data = pd.read_pickle('pickle/dataframe.pickle') # Connect to Robinhood if not config['simulate_api_calls']: try: print('Logging in to Robinhood') rh_response = rh.login(config['username'], config['password']) except: print('Got exception while attempting to log into Robinhood.') exit() # Download Robinhood parameters for a_robinhood_ticker in config['ticker_list'].values(): if not config['simulate_api_calls']: try: result = rh.get_crypto_info(a_robinhood_ticker) self.min_share_increments.update({ a_robinhood_ticker: float(result['min_order_quantity_increment']) }) self.min_price_increments.update({ a_robinhood_ticker: float(result['min_order_price_increment']) }) except: print('Failed to get increments from RobinHood.') exit() else: self.min_share_increments.update({a_robinhood_ticker: 0.0001}) self.min_price_increments.update({a_robinhood_ticker: 0.0001}) # How much cash do we have? self.update_available_cash() # Install signal handlers signal.signal(signal.SIGTERM, self.handle_exit) signal.signal(signal.SIGINT, self.handle_exit) print('Bot Ready') return
from app.servers.forms import ServerStartStopForm from app.servers.controllers import servers_mod from app.users.models import Users from app.users.forms import LoginForm from app.users.controllers import users_mod from app.servers.profiles.controllers import profiles_mod from app.servers.profiles.models import ServersProfiles, ArchiveServersProfiles from app.servers.easyrsa.controllers import easyrsa_mod from app.servers.easyrsa.models import EasyRsa, ArchiveEasyRsa from app.servers.clients.controllers import clients_mod from app.servers.clients.models import Clients # init Flask app app = Flask(__name__) for key, val in config.items(section='APP', raw=True): app.config[key.upper()] = str2bool(val) csrf = CsrfProtect(app) # Jinja2 newline to <BR> _paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}') @app.template_filter() @evalcontextfilter def nl2br(eval_ctx, value): result = u'\n\n'.join( u'<p>{}</p>'.format(p.replace('\n', '<br>\n')) for p in _paragraph_re.split(escape(value)) )