def create_celery_worker_scripts(self): """ Creates the task worker python script. It uses a configuration file for setup. Returns: The full path of the worker script. """ header_template = file_io.read(self.HEADER_LOC) task_template = file_io.read(self.TASK_LOC) header_template = header_template.replace("APP_ID", self._app_id) script = header_template.replace("CELERY_CONFIGURATION", self._app_id) + \ '\n' for name, queue in self.queues.iteritems(): # Celery only handles push queues. if not isinstance(queue, PushQueue): continue # The queue name is used as a function name so replace invalid chars queue_name = queue.name.replace('-', '_') new_task = task_template.\ replace("QUEUE_NAME", self.get_queue_function_name(queue_name)) # For tasks generated by mapreduce, or destined to be run by a module, # the hostname may have a prefix that corresponds to a different # subdomain. # AppScale does not support that type of routing currently, so the main # loadbalancer IP/hostname is used here for the execution of a task. new_task = new_task.\ replace("PUBLIC_IP", "\"{}\"".format(self.get_public_ip())) script += new_task + '\n' worker_file = self.get_celery_worker_script_path(self._app_id) file_io.write(worker_file, script) return worker_file
def create_celery_worker_scripts(self, input_type): """ Creates the task worker python script. It uses a configuration file for setup. Args: input_type: Whether to use the config file or the database queue info. Default: config file. Returns: The full path of the worker script. """ queue_info = self._queue_info_file if input_type == self.QUEUE_INFO_DB: queue_info = self._queue_info_db header_template = file_io.read(self.HEADER_LOC) task_template = file_io.read(self.TASK_LOC) header_template = header_template.replace("APP_ID", self._app_id) script = header_template.replace("CELERY_CONFIGURATION", self._app_id) + '\n' for queue in queue_info['queue']: queue_name = queue['name'] # The queue name is used as a function name so replace invalid chars queue_name = queue_name.replace('-', '_') self.validate_queue_name(queue_name) new_task = task_template.replace("QUEUE_NAME", self.get_queue_function_name(queue_name)) script += new_task + '\n' worker_file = self.get_celery_worker_script_path(self._app_id) file_io.write(worker_file, script) return worker_file
def create_config_file(watch, start_cmd, stop_cmd, ports, env_vars={}, max_memory=500, syslog_server="", host=None): """ Reads in a template file for monit and fills it with the correct configuration. The caller is responsible for deleting the created file. Args: watch: A string which identifies this process with monit start_cmd: The start command to start the process stop_cmd: The stop command to kill the process ports: A list of ports that are being watched env_vars: The environment variables used when starting the process max_memory: An int that names the maximum amount of memory that this process is allowed to use (in megabytes) before monit should restart it. syslog_server: The IP of the remote syslog server to use. host: The private IP of a server that runs the appengine role; used for reliably detecting a running app server process. Returns: The name of the created configuration file. Raises: TypeError with bad argument types """ if not isinstance(watch, str): raise TypeError("Expected str") if not isinstance(start_cmd, str): raise TypeError("Expected str") if not isinstance(stop_cmd, str): raise TypeError("Expected str") if not isinstance(ports, list): raise TypeError("Expected list") if not isinstance(env_vars, dict): raise TypeError("Expected dict") env = "" for ii in env_vars: env += "export " + str(ii) + "=\"" + str(env_vars[ii]) + "\" && " # Convert ints to strings for template formatting for index, ii in enumerate(ports): ports[index] = str(ii) # 'WATCH' and 'port' are substituted here as the last two arguments # because the template script itself uses {}. If we do not sub for them # a key error is raised by template.format(). template = "" for port in ports: if syslog_server: template = file_io.read(TEMPLATE_LOCATION_SYSLOG) template = template.format(watch, start_cmd, stop_cmd, port, env, max_memory, syslog_server) else: template = file_io.read(TEMPLATE_LOCATION) template = template.format(watch, start_cmd, stop_cmd, port, env, max_memory) if host: template += " if failed host {} port {} then restart\n".\ format(host, port) config_file = '{}/appscale-{}-{}.cfg'.\ format(MONIT_CONFIG_DIR, watch, port) file_io.write(config_file, template) return
def __init__(self): GPIO.setmode(GPIO.BCM) GPIO.setup(dconfig.card_dispenser_pin, GPIO.OUT) GPIO.output(dconfig.card_dispenser_pin, 1) self.cards_given = file_io.read("card_dispenser_file") self.capacity = file_io.read("card_capacity_file") if self.capacity == 0: self.set_capacity(dconfig.card_dispenser_capacity_default) self.card_send_warning = False
def __init__(self): """ Constructor. """ # Allow the client to choose any database node to connect to. database_master = file_io.read("/etc/appscale/masters").split() database_slaves = file_io.read("/etc/appscale/slaves").split() # In a one node deployment, the master is also written in the slaves file, # so we have to take out any duplicates. self.hosts = list(set(database_master + database_slaves)) self.port = CASS_DEFAULT_PORT server_list = ["{0}:{1}".format(host, self.port) for host in self.hosts] self.pool = pycassa.ConnectionPool(keyspace=KEYSPACE, server_list=server_list, prefill=False)
def __init__(self): """ Constructor. """ # Allow the client to choose any database node to connect to. database_master = file_io.read('/etc/appscale/masters').split() database_slaves = file_io.read('/etc/appscale/slaves').split() # In a one node deployment, the master is also written in the slaves file, # so we have to take out any duplicates. self.hosts = list(set(database_master + database_slaves)) self.cluster = Cluster(self.hosts, protocol_version=2) self.session = self.cluster.connect(KEYSPACE) self.session.default_consistency_level = ConsistencyLevel.QUORUM self.retry_policy = IdempotentRetryPolicy()
def get_public_ip(): """ Get the public IP of the current machine. Returns: String containing the public IP of the current machine. """ return file_io.read(constants.PUBLIC_IP_LOC).rstrip()
def writeScores(points,name): global listScores player =False hight =300 fontSize='20' listScores=file_io.read("listScores.csv") for (score, pName) in listScores.items(): listScores[score] = pName.replace('+',"") if points > 0: listScores[points] = " +++++ "+name+" +++++ " title(hight,fontSize) for (score, pName) in sorted(listScores.items(),reverse=True): # pName = pName.replace('+',"") hight =hight -60 sheldon.penup() sheldon.goto(-200,hight) sheldon.pendown() sheldon.color("black") instructions = str(score)+str(pName) sheldon.write(instructions, font=("Arial", fontSize, "normal"))
def get_private_ip(): """ Get the private IP of the current machine. Returns: String containing the private IP of the current machine. """ return file_io.read(constants.PRIVATE_IP_LOC).rstrip()
def stop_app_instance(app_name, port): """ Stops a Google App Engine application process instance on current machine. Args: app_name: Name of application to stop port: The port the application is running on Returns: True on success, False otherwise """ if not misc.is_app_name_valid(app_name): logging.error("Unable to kill app process %s on port %d because of " +\ "invalid name for application"%(app_name, int(port))) return False logging.info("Stopping application %s"%app_name) watch = "app___" + app_name + "-" + str(port) god_result = god_interface.stop(watch) # hack: God fails to shutdown processes so we do it via a system command # TODO: fix it or find an alternative to god pid_file = constants.APP_PID_DIR + app_name + '-' + port pid = file_io.read(pid_file) if str(port).isdigit(): if subprocess.call(['kill', '-9', pid]) != 0: logging.error("Unable to kill app process %s on port %d with pid %s"%\ (app_name, int(port), str(pid))) file_io.delete(pid_file) return god_result
def get_db_master_ip(): """ Returns the master datastore IP. Returns: A str, the IP of the datastore master. """ return file_io.read(constants.MASTERS_FILE_LOC).rstrip()
def get_secret(): """ Get AppScale shared security key for authentication. Returns: String containing the secret key. """ return file_io.read(constants.SECRET_LOC).rstrip()
def create_keyspaces(replication): """ Creates keyspace which AppScale uses for storing application and user data Args: replication: Replication factor for Cassandra Raises: AppScaleBadArg: When args are bad """ if int(replication) <= 0: raise dbconstants.AppScaleBadArg("Replication must be greater than zero") print "Creating Cassandra Key Spaces" # Set this to False to keep data from a previous deployment. Setting it # it to True will remove previous tables. _DROP_TABLES = True # TODO use shared library to get constants host = file_io.read('/etc/appscale/my_private_ip') sysman = system_manager.SystemManager(host + ":" +\ str(cassandra_interface.CASS_DEFAULT_PORT)) if _DROP_TABLES: try: sysman.drop_keyspace(cassandra_interface.KEYSPACE) except pycassa.cassandra.ttypes.InvalidRequestException, e: pass
def __init__(self): """ Constructor. """ self.host = file_io.read(PRIVATE_IP_FILE_LOC) self.conn = thriftclient.ThriftClient(self.host, THRIFT_PORT) self.ns = self.conn.namespace_open(NAMESPACE)
def get_login_ip(): """ Get the public IP of the head node. Returns: String containing the public IP of the head node. """ return file_io.read(constants.LOGIN_IP_LOC).rstrip()
def __init__(self): threading.Thread.__init__(self) self.cash_last_pay_time = time.time() self.cash_inside = file_io.read("cash_inside_file") self.cash_banknotes = file_io.read("cash_banknotes_file") self.capacity = file_io.read("cash_capacity_file") if self.capacity == 0: self.set_capacity(dconfig.money_capacity_default) self.price = file_io.read("cash_price_file") if self.price == 0: self.set_price(dconfig.payment_price_default) self.cash_session = 0 self.accept_money_var = False self.money_send_warning = False self.ser = serial.Serial(dconfig.money_device, 9600) self.initialized = True
def __init__(self, money_acceptor_object, card_dispenser_object): self.money_acceptor_object = money_acceptor_object self.card_dispenser_object = card_dispenser_object self.phone1 = file_io.read("gsm_phone1_file") if self.phone1 == 0: self.phone1 = dconfig.gsm_phone1_default file_io.write("gsm_phone1_file", self.phone1) self.phone2 = file_io.read("gsm_phone2_file") if self.phone2 == 0: self.phone2 = dconfig.gsm_phone2_default file_io.write("gsm_phone2_file", self.phone2) threading.Thread.__init__(self) self.new_ser = serial.Serial(dconfig.gsm_device, 9600) self.power_on()
def __init__(self): """ Constructor. """ self.host = file_io.read( constants.APPSCALE_HOME + '/.appscale/my_private_ip') self.conn = thriftclient.ThriftClient(self.host, THRIFT_PORT) self.ns = self.conn.namespace_open(NS)
def get_db_info(): """ Get information on the database being used. Returns: A dictionary with database info """ info = file_io.read(constants.DB_INFO_LOC) return yaml.load(info)
def get_all_ips(): """ Get the IPs for all deployment nodes. Returns: A list of node IPs. """ nodes = file_io.read(constants.ALL_IPS_LOC) nodes = nodes.split('\n') return filter(None, nodes)
def __init__(self): """ Constructor. """ self.host = file_io.read('/etc/appscale/my_private_ip') self.port = CASS_DEFAULT_PORT self.pool = pycassa.ConnectionPool(keyspace=KEYSPACE, server_list=[self.host+":"+str(self.port)], prefill=False)
def get_connection_string(): """ Reads from the local FS to get the RabbitMQ location to connect to. Returns: A string representing the location of RabbitMQ. """ rabbitmq_ip = file_io.read(RABBITMQ_LOCATION_FILE) return 'amqp://*****:*****@' + rabbitmq_ip + ':' + \ str(RABBITMQ_PORT) + '//'
def get_db_slave_ips(): """ Returns the slave datastore IPs. Returns: A list of IP of the datastore slaves. """ nodes = file_io.read(constants.SLAVES_FILE_LOC).rstrip() nodes = nodes.split('\n') if nodes[-1] == '': nodes = nodes[:-1] return nodes
def get_search_location(): """ Returns the IP and port of where the search service is running. Returns: A str, the IP and port in the format: IP:PORT. Empty string if the service is not available. """ try: return file_io.read(constants.SEARCH_FILE_LOC).rstrip() except IOError: logging.warning("Search role is not configured.") return ""
def get_taskqueue_nodes(): """ Returns a list of all the taskqueue nodes (including the master). Strips off any empty lines Returns: A list of taskqueue nodes. """ nodes = file_io.read(constants.TASKQUEUE_NODE_FILE) nodes = nodes.split('\n') if nodes[-1] == '': nodes = nodes[:-1] return nodes
def load_queues_from_file(self): """ Translates an application's queue configuration file to queue objects. Returns: A dictionary mapping queue names to Queue objects. Raises: ValueError: If queue_file is unable to get loaded. """ queue_file = self.get_queue_file_location(self._app_id) using_default = False try: info = file_io.read(queue_file) logging.info('Found queue file for {}'.format(self._app_id)) except IOError: logging.info( 'No queue file found for {}, using default queue'.format(self._app_id)) info = self.DEFAULT_QUEUE_YAML using_default = True #TODO handle bad xml/yaml files. if queue_file.endswith('yaml') or using_default: queue_info = queueinfo.LoadSingleQueue(info).ToDict() elif queue_file.endswith('xml'): queue_info = self.parse_queue_xml(info) else: raise ValueError("Unable to load queue information with %s" % queue_file) if not queue_info: raise ValueError("Queue information with %s not set" % queue_file) # We add in the default queue if its not already in there. has_default = False if 'queue' not in queue_info or len(queue_info['queue']) == 0: queue_info = {'queue' : [{'rate':'5/s', 'name': 'default'}]} for queue in queue_info['queue']: if queue['name'] == 'default': has_default = True if not has_default: queue_info['queue'].append({'rate':'5/s', 'name': 'default'}) logging.info('Queue for {}:\n{}'.format(self._app_id, queue_info)) # Discard the invalid queues. queues = {} for queue in queue_info['queue']: try: queues[queue['name']] = Queue(queue) except InvalidQueueConfiguration: logging.exception('Invalid queue configuration') return queues
def create_connection(self): """ Creates a connection to HBase's Thrift to the local node. Returns: An HBase client object """ host = file_io.read('/etc/appscale/my_private_ip') t = TSocket.TSocket(host, THRIFT_PORT) t = TTransport.TBufferedTransport(t) p = TBinaryProtocol.TBinaryProtocol(t) c = Hbase.Client(p) t.open() return c
def get_zk_locations_string(): """ Returns the ZooKeeper connection host string. Returns: A string containing one or more host:port listings, separated by commas. None is returned if there was a problem getting the location string. """ try: info = file_io.read(constants.ZK_LOCATIONS_JSON_FILE) zk_json = json.loads(info) return ":2181,".join(zk_json['locations']) + ":2181" except IOError, io_error: logging.exception(io_error) return constants.ZK_DEFAULT_CONNECTION_STR
def __init__(self): self.host = file_io.read(constants.APPSCALE_HOME + '/.appscale/my_private_ip') self.port = DEFAULT_PORT self.pool = pycassa.ConnectionPool(keyspace='Keyspace1', server_list=[self.host+":"+str(self.port)], prefill=False) sys = SystemManager(self.host + ":" + str(DEFAULT_PORT)) try: sys.create_column_family('Keyspace1', SCHEMA_TABLE, comparator_type=UTF8_TYPE) except Exception, e: print "Exception creating column family: %s"%str(e) pass
def get_zk_node_ips(): """ Returns a list of zookeeper node IPs. Returns: A list containing the hosts that run zookeeper roles in the current AppScale deployment. """ try: info = file_io.read(constants.ZK_LOCATIONS_JSON_FILE) zk_json = json.loads(info) return zk_json['locations'] except IOError, io_error: logging.exception(io_error) return []
def create_config_file(watch, start_cmd, stop_cmd, ports, env_vars={}, max_memory=500): """ Reads in a template file for monit and fills it with the correct configuration. The caller is responsible for deleting the created file. Args: watch: A string which identifies this process with monit start_cmd: The start command to start the process stop_cmd: The stop command to kill the process ports: A list of ports that are being watched env_vars: The environment variables used when starting the process max_memory: An int that names the maximum amount of memory that this process is allowed to use (in megabytes) before monit should restart it. Returns: The name of the created configuration file. Raises: TypeError with bad argument types """ if not isinstance(watch, str): raise TypeError("Expected str") if not isinstance(start_cmd, str): raise TypeError("Expected str") if not isinstance(stop_cmd, str): raise TypeError("Expected str") if not isinstance(ports, list): raise TypeError("Expected list") if not isinstance(env_vars, dict): raise TypeError("Expected dict") template = file_io.read(TEMPLATE_LOCATION) env = "" for ii in env_vars: env += "export " + str(ii) + "=\"" + str(env_vars[ii]) + "\" && " # Convert ints to strings for template formatting for index, ii in enumerate(ports): ports[index] = str(ii) # 'WATCH' and 'port' are substituted here as the last two arguments # because the template script itself uses {}. If we do not sub for them # a key error is raised by template.format(). for port in ports: template = template.format(watch, start_cmd, stop_cmd, port, env, max_memory) temp_file_name = "/etc/monit/conf.d/" + watch + '-' + \ str(port) + ".cfg" file_io.write(temp_file_name, template) return
def create_config_file(watch, start_cmd, stop_cmd, ports, env_vars={}): """ Reads in a template file for god and fills it with the correct configuration. The caller is responsible for deleting the created file. Args: watch: A string which identifies this process with god start_cmd: The start command to start the process stop_cmd: The stop command to kill the process ports: A list of ports that are being watched env_vars: The environment variables used when starting the process Returns: The name of the created configuration file. Raises: TypeError with bad argument types """ if not isinstance(watch, str): raise TypeError("Expected str") if not isinstance(start_cmd, str): raise TypeError("Expected str") if not isinstance(stop_cmd, str): raise TypeError("Expected str") if not isinstance(ports, list): raise TypeError("Expected list") if not isinstance(env_vars, dict): raise TypeError("Expected dict") template = file_io.read(TEMPLATE_LOCATION) env = "" for ii in env_vars: env += " \"" + str(ii) + "\" => \"" + str(env_vars[ii]) + "\",\n" if env: env = "w.env = {" + env + "}" # Convert ints to strings for template formatting for index, ii in enumerate(ports): ports[index] = str(ii) # 'WATCH' and 'port' are substituted here as the last two arguments # because the template script itself uses {}. If we do not sub for them # a key error is raised by template.format(). template = template.format(watch, start_cmd, stop_cmd, ', '.join(ports), env, "{WATCH}", "{port}") temp_file_name = "/tmp/god-" + watch + '-' + \ str(random.randint(0, 9999999)) + ".conf" file_io.write(temp_file_name, template) return temp_file_name
def load_queues_from_file(self, app_id): """ Parses the queue.yaml or queue.xml file of an application and loads it into the class. Args: app_id: The application ID. Returns: A dictionary of the queue settings. Raises: ValueError: If queue_file is unable to get loaded. """ queue_file = self.get_queue_file_location(app_id) info = "" using_default = False try: info = file_io.read(queue_file) logging.info("Found queue file for app {0}".format(app_id)) except IOError: logging.info("No queue file found for app {0}, using default queue" \ .format(app_id)) info = self.DEFAULT_QUEUE_YAML using_default = True queue_info = "" #TODO handle bad xml/yaml files. if queue_file.endswith('yaml') or using_default: queue_info = queueinfo.LoadSingleQueue(info).ToDict() elif queue_file.endswith('xml'): queue_info = self.parse_queue_xml(info) else: raise ValueError("Unable to load queue information with %s" % queue_file) if not queue_info: raise ValueError("Queue information with %s not set" % queue_file) # We add in the default queue if its not already in there. has_default = False if 'queue' not in queue_info or len(queue_info['queue']) == 0: queue_info = {'queue' : [{'rate':'5/s', 'name': 'default'}]} for queue in queue_info['queue']: if queue['name'] == 'default': has_default = True if not has_default: queue_info['queue'].append({'rate':'5/s', 'name': 'default'}) self._queue_info_file = queue_info logging.info("AppID {0} -- Loaded queue {1}".format(app_id, queue_info)) return queue_info
def __init__(self): self.host = file_io.read(constants.APPSCALE_HOME + '/.appscale/my_private_ip') self.port = DEFAULT_PORT self.pool = pycassa.ConnectionPool( keyspace='Keyspace1', server_list=[self.host + ":" + str(self.port)], prefill=False) sys = SystemManager(self.host + ":" + str(DEFAULT_PORT)) try: sys.create_column_family('Keyspace1', SCHEMA_TABLE, comparator_type=UTF8_TYPE) except Exception, e: print "Exception creating column family: %s" % str(e) pass
def plot_param_to_epochs(files, is_log=False): names = [] for f in files: loaded_dict = read(f) algo_name = loaded_dict['algo_name'] optional_err_desc = f" for max_err={loaded_dict['max_err']}" if algo_name.find('ADALINE') == 0 else '' names.append(algo_name + optional_err_desc) x_name = loaded_dict['params_name'] title = f"simulation {x_name}" plt.plot(loaded_dict['params'], loaded_dict['avg_epochs']) # x_list = loaded_dict['params'] # x_stick = [[-x, x] for x in x_list] # plt.xticks(x_list, x_stick) plt.ylim(1, 100) if is_log: plt.yscale('log') set_plt_data(plt, title, names, x_name)
def __init__(self, persist=False, logs_path=None, request_data=None): """Initializer. Args: persist: For backwards compatability. Has no effect. logs_path: A str containing the filename to use for logs storage. Defaults to in-memory if unset. request_data: A apiproxy_stub.RequestData instance used to look up state associated with the request that generated an API call. """ super(LogServiceStub, self).__init__('logservice', request_data=request_data) self._pending_requests = defaultdict(logging_capnp.RequestLog.new_message) self._pending_requests_applogs = dict() self._log_server = defaultdict(Queue) #get head node_private ip from /etc/appscale/head_node_private_ip self._log_server_ip = file_io.read("/etc/appscale/head_node_private_ip").rstrip()
def execute(filepath): valid, payload = jsonutil.load_and_validate_payload( schemas, request.Request.OUT) if valid is False: return -1 print( json.dumps({ "version": { "version": file_io.read(filepath, jsonutil.get_params_value(payload, "version")) } })) return 0
def create_keyspaces(replication): """ Creates keyspace which AppScale uses for storing application and user data Args: replication: Replication factor for Cassandra Raises: AppScaleBadArg: When args are bad """ if int(replication) <= 0: raise dbconstants.AppScaleBadArg("Replication must be greater than zero") print "Creating Cassandra Key Spaces" # TODO use shared library to get constants host = file_io.read('/etc/appscale/my_private_ip') sysman = system_manager.SystemManager(host + ":" +\ str(cassandra_interface.CASS_DEFAULT_PORT)) try: sysman.create_keyspace(cassandra_interface.KEYSPACE, pycassa.SIMPLE_STRATEGY, {'replication_factor':str(replication)}) # This column family is for testing for functional testing sysman.create_column_family(cassandra_interface.KEYSPACE, cassandra_interface.STANDARD_COL_FAM, comparator_type=system_manager.UTF8_TYPE) for table_name in dbconstants.INITIAL_TABLES: sysman.create_column_family(cassandra_interface.KEYSPACE, table_name, comparator_type=system_manager.UTF8_TYPE) sysman.close() # TODO: Figure out the exact exceptions we're trying to catch in the # case where we are doing data persistance except Exception, e: sysman.close() # TODO: Figure out the exact exceptions we're trying to catch in the print "Received an exception of type " + str(e.__class__) +\ " with message: " + str(e)
def kill_app_instances_for_app(app_name): """ Kills all instances of a Google App Engine application on this machine. Args: app_name: The application ID corresponding to the app to kill. Returns: A list of the process IDs whose instances were terminated. """ pid_files = glob.glob(constants.APP_PID_DIR + app_name + '-*') pids_killed = [] for pid_file in pid_files: pid = file_io.read(pid_file) if subprocess.call(['kill', '-9', pid]) == 0: pids_killed.append(pid) else: logging.error("Unable to kill app process %s with pid %s" % \ (app_name, str(pid))) return pids_killed
def load_queues_from_file(self): """ Translates an application's queue configuration file to queue objects. Returns: A dictionary mapping queue names to Queue objects. Raises: ValueError: If queue_file is unable to get loaded. """ using_default = False queue_file = '' try: queue_file = self.get_queue_file_location(self._app_id) try: info = file_io.read(queue_file) logger.info('Found queue file for {} in: {}'. format(self._app_id, queue_file)) except IOError: logger.error( 'No queue file found for {}, using default queue'.format(self._app_id)) info = self.DEFAULT_QUEUE_YAML using_default = True except apiproxy_errors.ApplicationError as application_error: logger.error(application_error.message) info = self.DEFAULT_QUEUE_YAML using_default = True #TODO handle bad xml/yaml files. if queue_file.endswith('yaml') or using_default: queue_info = queueinfo.LoadSingleQueue(info).ToDict() elif queue_file.endswith('xml'): queue_info = self.parse_queue_xml(info) else: raise ValueError("Unable to load queue information with %s" % queue_file) if not queue_info: raise ValueError("Queue information with %s not set" % queue_file) # We add in the default queue if its not already in there. has_default = False if 'queue' not in queue_info or len(queue_info['queue']) == 0: queue_info = {'queue' : [{'rate':'5/s', 'name': 'default'}]} for queue in queue_info['queue']: if queue['name'] == 'default': has_default = True if not has_default: queue_info['queue'].append({'rate':'5/s', 'name': 'default'}) logger.info('Queue for {}:\n{}'.format(self._app_id, queue_info)) # Discard the invalid queues. queues = {} for queue in queue_info['queue']: if 'mode' in queue and queue['mode'] == 'pull': try: queues[queue['name']] = PullQueue(queue, self._app_id, self.db_access) except InvalidQueueConfiguration: logger.exception('Invalid queue configuration') else: try: queues[queue['name']] = PushQueue(queue, self._app_id) except InvalidQueueConfiguration: logger.exception('Invalid queue configuration') return queues
if len(sys.argv) < 3: print("Please include parameter file and API file") param_file = sys.argv[1] key_file = sys.argv[2] program_start = int(time.time()) subreddits = file_io.read_parameters(param_file)[1] k, src, dst, ext = file_io.read_parameters(param_file)[5:] k = k * 1000 language.set_API_key(key_file) for sub in subreddits: sub_data = [] input_file = src + '/' + sub.lower() + '_' + src + '.json' comment_data = file_io.read(input_file)["data"] output_file = dst + '/' + sub.lower() + '_' + dst + '.json' file_io.set_output_file(output_file) c = 0 #number of api calls d = 0 #number of succesful api calls st = 0 end = len(comment_data) if ext: old_data = file_io.read(output_file) sub_data = old_data['data'] last_record = sub_data[-1] for j in range(k): if comment_data[j]['permalink'] == last_record['permalink']: st = j + 1
#gather data on stopwords and symbols to be ignored from nltk.corpus import stopwords stopwords.words('english') stop_words = set(stopwords.words('english')) punctuation = list(string.punctuation) lemmatizer = WordNetLemmatizer() syn = file_io.load_dict('syn.txt') slurs = file_io.load_slurs('slurs') for sub in subreddits: sub_data = [] word_count = {} input_file = src + '/' + sub.lower() + '_' + src + '.json' comment_data = file_io.read(input_file)["data"] output_file = src + '/' + sub.lower() + '_' + src + '_attack.json' file_io.set_output_file(output_file) slur_count_file = src + '/' + sub.lower() + '_' + src + '_tf.txt' for datum in comment_data: comment = datum['body'] #tokenize listing, remove unwanted elements tokens = re.findall(r"[\w']+", comment) filtered_data = [w for w in tokens if not (w in stop_words or w in punctuation or w.isnumeric())] lemmatized_data = [] for i in range(len(filtered_data)): tok = tokens[i].lower() tok = tok.replace(',','') if tok in syn:
import file_io import sys import os if len(sys.argv) < 2: print("Please include parameter file") param_file = sys.argv[1] subreddits = file_io.read_parameters(param_file)[1] src = file_io.read_parameters(param_file)[6] count = [] subreddits = [x.lower() for x in subreddits] subreddits.sort() for sub in subreddits: del_count = 0 input_file = src + '/' + sub.lower() + '_' + src + '.json' data = file_io.read(input_file)['data'] for datum in data: if datum['body'] == '[removed]' in datum['body']: del_count += 1 count.append("{:15s}{:10,d}\n".format(sub,del_count)) with open(src + '/' + src + '_rem_count.txt', 'w', encoding="utf8") as f: f.writelines(count)
import secrets import hashlib import file_io from user import User from password_generator import generate_password user_login = input('What is your login? \n') user_password = input('What is your main password? \n') user = User(user_login, user_password) file_io.read(user) with_upper_case = True keep_looping = True while keep_looping: service = input('For what service do you want a password? \n') service_login = input('What is your login for said service? \n') service_digits = int( input('How many digits do you want for your password? (8-16) \n')) generated_password = generate_password(service_digits, with_upper_case) user.add_service_account(service, service_login, generated_password) print(user.service_accounts) loop = input('Do you want to add another service? [Y/N]')
def create_config_file(watch, start_cmd, stop_cmd, ports, env_vars={}, max_memory=500, syslog_server="", host=None, upgrade_flag=False, match_cmd=""): """ Reads in a template file for monit and fills it with the correct configuration. The caller is responsible for deleting the created file. Args: watch: A string which identifies this process with monit start_cmd: The start command to start the process stop_cmd: The stop command to kill the process ports: A list of ports that are being watched env_vars: The environment variables used when starting the process max_memory: An int that names the maximum amount of memory that this process is allowed to use (in megabytes) before monit should restart it. syslog_server: The IP of the remote syslog server to use. host: The private IP of a server that runs the appengine role; used for reliably detecting a running app server process. Returns: The name of the created configuration file. Raises: TypeError with bad argument types """ if not isinstance(watch, str): raise TypeError("Expected str") if not isinstance(start_cmd, str): raise TypeError("Expected str") if not isinstance(stop_cmd, str): raise TypeError("Expected str") if not isinstance(ports, list): raise TypeError("Expected list") if not isinstance(env_vars, dict): raise TypeError("Expected dict") env = "" for ii in env_vars: env += "export " + str(ii) + "=\"" + str(env_vars[ii]) + "\" && " # Convert ints to strings for template formatting for index, ii in enumerate(ports): ports[index] = str(ii) # 'WATCH' and 'port' are substituted here as the last two arguments # because the template script itself uses {}. If we do not sub for them # a key error is raised by template.format(). template = "" for port in ports: if syslog_server: template = file_io.read(TEMPLATE_LOCATION_SYSLOG) template = template.format(watch, start_cmd, stop_cmd, port, env, max_memory, syslog_server) else: if upgrade_flag: template = file_io.read(TEMPLATE_LOCATION_FOR_UPGRADE) template = template.format(watch=watch, start=start_cmd, stop=stop_cmd, port=port, match=match_cmd, env=env, memory=max_memory) else: template = file_io.read(TEMPLATE_LOCATION) template = template.format(watch, start_cmd, stop_cmd, port, env, max_memory) if host: template += " if failed host {} port {} then restart\n".\ format(host, port) config_file = '{}/appscale-{}-{}.cfg'.\ format(MONIT_CONFIG_DIR, watch, port) file_io.write(config_file, template) return