def _fake_sensors_cron(): """Sends every 0-100 seconds a new fake sensor event""" sleep(10) print "Logging in for fake sensors" r = requests.post("http://localhost:5000/login", data={"username": "******", "password": "******"}) cookie = r.headers["Set-Cookie"] cookie = cookie[: cookie.index(";")] headers = {"Cookie": cookie} print "Login complete. Registering sensors" with open("./examples/sensor_example", "r") as f: j = json.load(f) r = requests.post("http://localhost:5000/register", data=json.dumps(j), headers=headers) temp_id = int(r.text) j["name"] = "Humidity" j["type"] = "SHT21" j["location"] = "Kitchen" j["sensorRef"] = "cronGeneratedSensor" r = requests.post("http://localhost:5000/register", data=json.dumps(j), headers=headers) hum_id = int(r.text) with open("./examples/sensor2_example", "r") as f: j = json.load(f) r = requests.post("http://localhost:5000/register", data=json.dumps(j), headers=headers) keep_alive_id = int(r.text) with open("./examples/sensor3_example", "r") as f: j = json.load(f) r = requests.post("http://localhost:5000/register", data=json.dumps(j), headers=headers) print "Registration complete. Generating random data" from random import random, randrange while True: sleep(int(random() * 100)) if random() < 0.5: temp = randrange(15, 30, 1) if random() < 0.5: temp += 0.5 temp = float(temp) print "New temperature sensor event, value: " + str(temp) send = {"timestamp": get_timestamp(), "sensor_id": temp_id, "value": {"value": temp}} print "Response: " + requests.post( "http://localhost:5000/event", data=json.dumps(send), headers=headers ).text else: hum = randrange(40, 95, 1) send = {"timestamp": get_timestamp(), "sensor_id": hum_id, "value": {"value": hum}} print "New humidity sensor event, value: " + str(hum) print "Response: " + requests.post( "http://localhost:5000/event", data=json.dumps(send), headers=headers ).text if random() < 0.3: print "New keep alive message for sensor" + str(keep_alive_id) send = {"id": keep_alive_id} print "Response: " + requests.post( "http://localhost:5000/keep_alive", data=json.dumps(send), headers=headers ).text
def create_game(request): if request.method == 'POST': form = CreateGameForm(request.user, request.POST) if form.is_valid(): request.session['ruleset'] = form.cleaned_data['ruleset'].id request.session['start_date'] = get_timestamp(form.cleaned_data['start_date'].astimezone(utc)) request.session['end_date'] = get_timestamp(form.cleaned_data['end_date'].astimezone(utc)) request.session['players'] = [player.id for player in sorted(form.cleaned_data['players'].all(), key = lambda player: player.name)] return redirect('select_rules') else: form = CreateGameForm(request.user) return render(request, 'game/create.html', {'form': form, 'rulesets': Ruleset.objects.all()})
def _fake_sensors_cron(): """Sends every 0-100 seconds a new fake sensor event""" sleep(10) print 'Logging in for fake sensors' r = requests.post('http://localhost:5000/login', data={'username': '******', 'password': '******'}) cookie = r.headers['Set-Cookie'] cookie = cookie[:cookie.index(';')] headers = {'Cookie': cookie} print 'Login complete. Registering sensors' with open('./examples/sensor_example', 'r') as f: j = json.load(f) r = requests.post('http://localhost:5000/register', data=json.dumps(j), headers=headers) temp_id = int(r.text) j['name'] = 'Humidity' j['type'] = 'SHT21' j['location'] = "Kitchen" j['sensorRef']="cronGeneratedSensor" r = requests.post('http://localhost:5000/register', data=json.dumps(j), headers=headers) hum_id = int(r.text) with open('./examples/sensor2_example', 'r') as f: j = json.load(f) r = requests.post('http://localhost:5000/register', data=json.dumps(j), headers=headers) keep_alive_id = int(r.text) with open('./examples/sensor3_example', 'r') as f: j = json.load(f) r = requests.post('http://localhost:5000/register', data=json.dumps(j), headers=headers) print 'Registration complete. Generating random data' from random import random, randrange while True: sleep(int(random() * 100)) if random() < 0.5: temp = randrange(15, 30, 1) if random() < 0.5: temp += 0.5 temp = float(temp) print 'New temperature sensor event, value: ' + str(temp) send = {'timestamp': get_timestamp(), 'sensor_id': temp_id, 'value': {'value': temp}} print 'Response: ' + requests.post('http://localhost:5000/event', data=json.dumps(send), headers=headers).text else: hum = randrange(40, 95, 1) send = {'timestamp': get_timestamp(), 'sensor_id': hum_id, 'value': {'value': hum}} print 'New humidity sensor event, value: ' + str(hum) print 'Response: ' + requests.post('http://localhost:5000/event', data=json.dumps(send), headers=headers).text if random()<0.3: print 'New keep alive message for sensor' + str(keep_alive_id) send={'id':keep_alive_id} print 'Response: '+requests.post("http://localhost:5000/keep_alive", data=json.dumps(send),headers=headers).text
def _append_sensor_event(event): """Collects the given sensor event. Then, if an activity is inferred, anomalies are checked and the social network is updated""" print 'Reasoning activities from sensor events' reasoner.feed(event) activity = reasoner.infer_activity() if not activity is None: print 'Sensor data generated a new activity: ' + activity.is_a.name anomaly = model.check_anomaly(activity.is_a.name) if anomaly is None: # If new activity inferred and no anomaly detected, send it to the social network print 'No anomaly detected, storing data and sending to social network' model.new_sensor_event(activity.is_a.name, activity.timestamp) db.session.add(activity) db.session.commit() if app.config['UPDATE-SOCIAL-NETWORK']: url = 'http://www.sonopa.com/network/newactivity' # TODO Set correct URL json_obj = dict() json_obj['user_id'] = config['id'] json_obj['activity'] = {'id': activity.is_a.id, 'name': activity.is_a.name} json_obj['timestamp'] = get_timestamp(mktime(activity.timestamp.timetuple())) # response = _send_json_post(url, json_obj) # TODO Uncomment and handle response else: print 'Anomaly detected, sending alert to social network' try: db.session.add(models.Anomaly.from_chain_model(anomaly)) except ValueError as e: print e.message else: db.session.commit() if app.config['UPDATE-SOCIAL-NETWORK']: url = 'http://www.sonopa.com/network/anomaly' # TODO Set correct URL json_obj = dict() json_obj['user_id'] = config['id'] json_obj['activity'] = {'id': anomaly.activity_id, 'name': anomaly.activity_name} json_obj['timestamp'] = get_timestamp(mktime(anomaly.timestamp.timetuple())) json_obj['anomaly_id'] = anomaly.id
def post(self): nem = NEMConnect() validator_obj = Validator(namespace_schema) data = json.loads(request.data) validate = validator_obj.validate(data) response = {} if (validate): transation = data.get("transaction") if transation is not None: timestamp = utils.get_timestamp() transation["timeStamp"] = timestamp transation["deadline"] = utils.get_deadline(timestamp) transation["rentalFee"] = CONFIG["CreateNameSpace"].get( "rentalFee") transation["fee"] = CONFIG["CreateNameSpace"].get("fee") transation["type"] = CONFIG["CreateNameSpace"].get("type") transation["version"] = CONFIG.get("version") response = nem.initiate_transaction(data) else: response['Errors'] = validator_obj.errors return response
def make_logger(hp): # set log/checkpoint dir hp.log.chkpt_dir = os.path.join(hp.log.chkpt_dir, hp.log.name) hp.log.log_dir = os.path.join(hp.log.log_dir, hp.log.name) os.makedirs(hp.log.chkpt_dir, exist_ok=True) os.makedirs(hp.log.log_dir, exist_ok=True) hp.log.log_file_path = os.path.join( hp.log.log_dir, "%s-%s.log" % (hp.log.name, get_timestamp())) # set logger logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s", handlers=[ logging.FileHandler(hp.log.log_file_path), logging.StreamHandler(), ], ) logger = logging.getLogger() return logger
def handler(event, context): """ Gets information from the Shodan API about all services that have been found on the given host IP. """ message_body_json = json.loads(event['Records'][0]['body']) asset_type = message_body_json['type'] domain_or_ip = message_body_json[asset_type] api_url = f'{SHODAN_BASE_URL}/shodan/host/{domain_or_ip}' sleep(DELAY_SECONDS_BETWEEN_API_REQUESTS) response = requests.get(api_url, params={ 'key': SHODAN_API_KEY, }) if response.status_code == 404: # Shodan doesn't have data for this IP address! # TODO: request a scan for this IP using the Shodan API, # and reschedule data collection after 24 hours (or so) print('No data available for this IP address') return # raise an exception if the status code was 4xx or 5xx: response.raise_for_status() domain_or_ip_results = response.json() print('Collected results') timestamp = get_timestamp() s3_key = f'shodan/{domain_or_ip}_{timestamp}.json' save_json_in_s3(domain_or_ip_results, s3_key, S3_BUCKET_COLLECTORS_STORAGE) print(f'Saved results in S3 at {s3_key}') # signal the analyzers to analyze the collected data: sns_topic_arn = os.environ['SNSTopicProcessShodanDataARN'] sns.publish(TopicArn=sns_topic_arn, Message=json.dumps({ 'type': asset_type, 'domain_or_ip': domain_or_ip, 's3_key': s3_key, })) print(f'Published SNS message to {sns_topic_arn}')
def setUpClass(cls): print('-----'+cls.__name__+'-----') super().setUpClass() cls.email = '*****@*****.**' cls.password = '******' cls.alias = 'kalonji' cls.admin_data = { 'staff_alias': cls.alias, 'admin_alias': cls.alias, 'email': cls.email, 'password': cls.password, } cls.name = 'Some Event Name' cls.date = get_timestamp().replace(second=0) + timedelta(days=200) cls.event_slug = 'some-event-name_' + str(cls.date.year)[2:] cls.model_meta = { 'type': 'Underwater Rugby', 'grade': 'Elite', 'age_group': 0, 'gender': 'Co-ed Teams', } cls.meta_form = cls.model_meta.copy() cls.meta_form.update({'age_group': 'Open'}) cls.team_list = []
def post(self): nem = NEMConnect() data = json.loads(request.data) transation = data.get("transaction") if transation is not None: timestamp = utils.get_timestamp() transation["timeStamp"] = timestamp transation["deadline"] = utils.get_deadline(timestamp) transation["fee"] = CONFIG["InitiateMultiSignTransaction"].get( "fee") transation["type"] = CONFIG["InitiateMultiSignTransaction"].get( "type") transation["version"] = CONFIG.get("version") if transation.get("otherTrans") is not None: transation["otherTrans"]["fee"] = CONFIG[ "InitiateMultiSignTransaction"]["otherTrans"].get("fee") transation["otherTrans"]["type"] = CONFIG[ "InitiateMultiSignTransaction"]["otherTrans"].get("type") transation["otherTrans"]["timeStamp"] = transation["timeStamp"] transation["otherTrans"]["deadline"] = transation["deadline"] transation["otherTrans"]["version"] = CONFIG.get("version") response = nem.initiate_transaction(data) return response
def jump_host_disruption(self, sync=None, finish_execution=None): self.sync = sync self.finish_execution = finish_execution infra.display_on_terminal(self, "Entering Jump Host Disruption plugin") table_name = "Jump host Disruption" infra.create_report_table(self, table_name) infra.add_table_headers(self, table_name, ["VM", "IP", "TimeStamp", "Status of Disruption"]) infra.display_on_terminal(self, "Entering Process Disruption plugin") input_args_dict = self.get_input_arguments() node_name = input_args_dict.keys()[0] input_args = input_args_dict.get(node_name, None) host_config = infra.get_openstack_config() print "*"*20 print input_args_dict print "input_args ==>",input_args print "host_config ==>",host_config nodes_to_be_disrupted = input_args.get('name',[]) if input_args: print "Inpt " + str(input_args) role = input_args.get('role', None) # jump_hosts = [] for node in host_config: if role in host_config[node].get('role', None): jump_host = node # jump_hosts.append(node) print "###############",jump_host node_reboot_command = "reboot -f" if self.sync: infra.display_on_terminal(self, "Waiting for notification") infra.wait_for_notification(sync) infra.display_on_terminal(self, "Received notification, Starting") ha_interval = self.get_ha_interval() # jump host details jump_host_ip = host_config.get(node, None).get('ip', None) user = host_config.get(node,None).get('user',None) password = host_config.get(node,None).get('password',None) #TODO - if its more than one jump host # Write into txt file to pass via ansible playbook f = open('/tmp/remote_ips','w+') for ip in nodes_to_be_disrupted: f.write(ip+'\n') f.close() while infra.is_execution_completed(self.finish_execution) is False: # for node in nodes_to_be_disrupted: # node = nodes_to_be_disrupted[0] # ip = host_config.get(node, None).get('ip', None) # user = host_config.get(node, None).get('user', None) # password = host_config.get(node, None).get('password', None) ip = node # openrc = host_config.get(node, None).get('openrc', None) # password = host_config.get(node, None).get('password', None) infra.display_on_terminal(self, "Nodes to be disrupted: ", str(nodes_to_be_disrupted), " Jump host: ", jump_host_ip) infra.display_on_terminal(self, "Executing ", node_reboot_command) print "*"*50 print "user :"******"password :"******"jump_host_ip :",jump_host_ip ret = AnsibleRunner(jump_host_ip,user,password).execute_on_remote() print ret # parse the output for report # node_list = os.walk('/tmp/hainfra').next()[1] # output_objs = eval(open('/tmp/hainfra/'+node+'/tmp/output','r').read()) output_objs = eval(open('/tmp/hainfra/output','r').read()) print output_objs for results in output_objs: error = [] for (hostname, result) in results['contacted'].items(): if 'failed' in result: print "%s >>> %s" % (hostname, result['msg']) error = result['msg'] if error: infra.display_on_terminal(self, "Error ", error, "color=red") infra.display_on_terminal(self, "waiting for ", hostname, " to " "come " "online") if infra.wait_for_ping(hostname, 240, 5): infra.display_on_terminal(self, "Node ", hostname, " is online", "color=green") infra.display_on_terminal(self, "Will sleep for interval ", str(ha_interval)) #time.sleep(ha_interval) if not error: infra.add_table_rows(self, table_name, [[jump_host_ip, hostname, utils.get_timestamp(), HAConstants.OKGREEN + 'Rebooted' + HAConstants.ENDC]]) else: infra.add_table_rows(self, table_name, [[jump_host_ip, hostname, utils.get_timestamp(), HAConstants.FAIL + str(error)+ HAConstants.ENDC]]) # bring it back to stable state ''' infra.display_on_terminal(self, "Waiting for the node to become stable") if infra.wait_for_ping(hostname, 240, 10): infra.display_on_terminal(self, "Node ", hostname, " is in stable state", "color=green") ''' infra.display_on_terminal(self, "Finishing Node Disruption")
def setup(self): self.event, self.meta = self.create_event('My Event', get_timestamp().replace(second=0))
def jump_host_process_disruption(self, sync=None, finish_execution=None): self.sync = sync self.finish_execution = finish_execution infra.display_on_terminal( self, "Entering Jump Host Process Disruption plugin") table_name = "Jump host Process Disruption" infra.create_report_table(self, table_name) infra.add_table_headers( self, table_name, ["VM", "Process", "TimeStamp", "Status of Disruption"]) infra.display_on_terminal(self, "Entering Process Disruption plugin") input_args_dict = self.get_input_arguments() node_name = input_args_dict.keys()[0] input_args = input_args_dict.get(node_name, None) host_config = infra.get_openstack_config() print "*" * 20 print input_args_dict print "input_args ==>", input_args print "host_config ==>", host_config nodes_to_be_disrupted = input_args.get('node', []) process_name = input_args.get('process_name', []) if input_args: print "Inpt " + str(input_args) role = input_args.get('role', None) # jump_hosts = [] for node in host_config: if role in host_config[node].get('role', None): jump_host = node # jump_hosts.append(node) print "###############", process_name # node_reboot_command = "reboot -f" # process_start_command = rhel_stop_command = "systemctl stop " + process_name rhel_start_command = "systemctl start " + process_name # jump host details jump_host_ip = host_config.get(node, None).get('ip', None) user = host_config.get(node, None).get('user', None) password = host_config.get(node, None).get('password', None) # copy necessary file to jump host runner = AnsibleRunner(jump_host_ip, user, password) infra.display_on_terminal(self, "Copying to ", jump_host_ip) runner.copy('jump_host_executor.py', 'scripts/', '/tmp/') infra.display_on_terminal(self, "Copied to ", jump_host_ip) if self.sync: infra.display_on_terminal(self, "Waiting for notification") infra.wait_for_notification(sync) infra.display_on_terminal(self, "Received notification, Starting") ha_interval = self.get_ha_interval() #TODO - if its more than one jump host # Write into txt file to pass via ansible playbook ''' f = open('/tmp/remote_ips','w+') for ip in nodes_to_be_disrupted: f.write(ip+'\n') f.close() ''' while infra.is_execution_completed(self.finish_execution) is False: ip = node # openrc = host_config.get(node, None).get('openrc', None) # password = host_config.get(node, None).get('password', None) infra.display_on_terminal(self, "Nodes to be disrupted: ", str(nodes_to_be_disrupted), " Jump host: ", jump_host_ip) infra.display_on_terminal(self, "Executing ", rhel_stop_command) # ret = AnsibleRunner(jump_host_ip,user,password).execute_on_remote() infra.display_on_terminal(self, "Stopping ", process_name) # replacing the playbook logic with ansible runner # Execute the script on jump host ret = runner.shell( 'python /tmp/jump_host_executor.py "%s" "%s" >>/tmp/output' % (nodes_to_be_disrupted, rhel_stop_command)) print ret # Fetching the result to local runner.fetch('output', '/tmp/', '/tmp/hainfra/output') # Deleting the output file runner.shell('rm /tmp/output') # parse the output for report output_objs = eval(open('/tmp/hainfra/output', 'r').read()) print output_objs for results in output_objs: error = [] for (hostname, result) in results['contacted'].items(): if 'failed' in result: print "%s >>> %s" % (hostname, result['msg']) error = result['msg'] if error: infra.display_on_terminal(self, "Error ", error, "color=red") if not error: infra.add_table_rows(self, table_name, [[ hostname, process_name, utils.get_timestamp(), HAConstants.OKGREEN + 'Stopped' + HAConstants.ENDC ]]) else: infra.add_table_rows(self, table_name, [[ hostname, process_name, utils.get_timestamp(), HAConstants.FAIL + str(error) + HAConstants.ENDC ]]) infra.display_on_terminal(self, "Will sleep for interval ", str(ha_interval)) time.sleep(ha_interval) infra.display_on_terminal(self, "Starting ", process_name) infra.display_on_terminal(self, "Executing ", rhel_start_command) ret = runner.shell( 'python /tmp/jump_host_executor.py "%s" "%s" >>/tmp/output' % (nodes_to_be_disrupted, rhel_start_command)) print ret runner.fetch('output', '/tmp/', '/tmp/hainfra/output') runner.shell('rm /tmp/output') # parse the output for report output_objs = eval(open('/tmp/hainfra/output', 'r').read()) for results in output_objs: hostname = results['hostname'] error = results['error'] if error: infra.display_on_terminal(self, "Error ", error, "color=red") if not error: infra.add_table_rows(self, table_name, [[ hostname, process_name, utils.get_timestamp(), HAConstants.OKGREEN + 'Started' + HAConstants.ENDC ]]) else: infra.add_table_rows(self, table_name, [[ hostname, process_name, utils.get_timestamp(), HAConstants.FAIL + str(error) + HAConstants.ENDC ]]) infra.display_on_terminal(self, "Finishing Process Disruption")
def func(klass): klass._match.add_white_goal(klass._get_in_game_time, get_timestamp())
def func(klass): klass._match.white_foul(foul, klass._get_in_game_time, get_timestamp())
def node_disruption(self, sync=None, finish_execution=None): self.sync = sync self.finish_execution = finish_execution infra.display_on_terminal(self, "Entering Node Disruption plugin") table_name = "Node Disruption" infra.create_report_table(self, table_name) infra.add_table_headers(self, table_name, ["Node", "IP", "TimeStamp", "Status of Disruption"]) infra.display_on_terminal(self, "Entering Process Disruption plugin") input_args_dict = self.get_input_arguments() node_name = input_args_dict.keys()[0] input_args = input_args_dict.get(node_name, None) host_config = infra.get_openstack_config() if input_args: print "Inpt " + str(input_args) role = input_args.get('role', None) nodes_to_be_disrupted = [] for node in host_config: if role in host_config[node].get('role', None): infra.display_on_terminal(self, node, " will be disrupted ") nodes_to_be_disrupted.append(node) node_reboot_command = "reboot -f " if self.sync: infra.display_on_terminal(self, "Waiting for notification") infra.wait_for_notification(sync) infra.display_on_terminal(self, "Received notification, Starting") ha_interval = self.get_ha_interval() for i in range(1): #while infra.is_execution_completed(self.finish_execution) is False: # for node in nodes_to_be_disrupted: node = nodes_to_be_disrupted[0] ip = host_config.get(node, None).get('ip', None) user = host_config.get(node, None).get('user', None) password = host_config.get(node, None).get('password', None) infra.display_on_terminal(self, "IP: ", ip, " User: "******" Pwd: ", password) infra.display_on_terminal(self, "Executing ", node_reboot_command) code, out, error = infra.ssh_and_execute_command(ip, user, password, node_reboot_command) if error: infra.display_on_terminal(self, "Error ", error, "color=red") infra.display_on_terminal(self, "waiting for ", ip, " to " "come " "online") if infra.wait_for_ping(ip, 240, 10): infra.display_on_terminal(self, "Node ", ip, " is online", "color=green") infra.display_on_terminal(self, "Will sleep for interval ", str(ha_interval)) #time.sleep(ha_interval) infra.add_table_rows(self, table_name, [[node, ip, utils.get_timestamp(), HAConstants.OKGREEN + 'Rebooted' + HAConstants.ENDC]]) # bring it back to stable state infra.display_on_terminal(self, "Waiting for the node to become stable") if infra.wait_for_ping(ip, 240, 10): infra.display_on_terminal(self, "Node ", ip, " is in stable state", "color=green") infra.display_on_terminal(self, "Finishing Node Disruption")
def display_infra_report(show_historical=False): displayed = False r,c = map(int, get_terminal_rc()) print print print "*" * int(c) title = "HA Infra Report Summary" print HAConstants.HEADER + title.center(int(c)) print ("Generated on " + utils.get_timestamp(complete_timestamp=True)).center(c) +\ HAConstants.ENDC print "Total Number of Launched Processes : ", \ get_launched_process_count() print "Time Started :", start_run_time print "Time Completed :", stop_run_time print "*" * int(c) for plugin_dir in ha_infra_report_tables: print tab_title = ("Result Reported by all " + plugin_dir.title()).title() print HAConstants.HEADER + tab_title.center(c) + HAConstants.ENDC print ("=" * len(tab_title)).center(c) ha_infra_repor = [ha_infra_report_tables.get(plugin_dir.title())] if len(ha_infra_report_tables.get(plugin_dir.title())) == 0: print " -- No Results Reported --".center(c) print for plugin_tables in ha_infra_repor: for plugin_name in plugin_tables: pname = ("Plugin Name : " + plugin_name).title() print pname print "=" * len(pname) table_count = 0 for plugin_table in plugin_tables[plugin_name]: if isinstance(plugin_table, str) and not displayed: print plugin_table displayed = True break for tablename in plugin_table: table_count += 1 display = True historic_table = ha_infra_historical_tables.\ get(tablename, False) if historic_table and show_historical: display = True elif not historic_table and show_historical: display = False elif historic_table and not show_historical: display = False elif not historic_table and not show_historical: display = True if display: individual_table = plugin_table[tablename] headers = individual_table[0] print t_title = "Table : " + str(table_count) \ + " " + tablename print t_title print "-" * len(t_title) report_table = PrettyTable(headers) for header in headers: report_table.align[header] = "l" report_table.padding_width = 3 rows = individual_table[1:] for row in rows: report_table.add_row(row) print str(report_table).center(c) print "-" * c print "*" * c return
def __half_time(self): match = self.get_match_object match.record_start_half(get_timestamp()) sleep(1) match.record_end_half(get_timestamp())
def func(klass, sec): if sec == klass.two_minute_mark.seconds: klass._match.record_two_minute_mark(get_timestamp())
def display_infra_report(show_historical=False): displayed = False r, c = map(int, get_terminal_rc()) print print print "*" * int(c) title = "HA Infra Report Summary" print HAConstants.HEADER + title.center(int(c)) print ("Generated on " + utils.get_timestamp(complete_timestamp=True)).center(c) +\ HAConstants.ENDC print "Total Number of Launched Processes : ", \ get_launched_process_count() print "Time Started :", start_run_time print "Time Completed :", stop_run_time print "*" * int(c) for plugin_dir in ha_infra_report_tables: print tab_title = ("Result Reported by all " + plugin_dir.title()).title() print HAConstants.HEADER + tab_title.center(c) + HAConstants.ENDC print("=" * len(tab_title)).center(c) ha_infra_repor = [ha_infra_report_tables.get(plugin_dir.title())] if len(ha_infra_report_tables.get(plugin_dir.title())) == 0: print " -- No Results Reported --".center(c) print for plugin_tables in ha_infra_repor: for plugin_name in plugin_tables: pname = ("Plugin Name : " + plugin_name).title() print pname print "=" * len(pname) table_count = 0 for plugin_table in plugin_tables[plugin_name]: if isinstance(plugin_table, str) and not displayed: print plugin_table displayed = True break for tablename in plugin_table: table_count += 1 display = True historic_table = ha_infra_historical_tables.\ get(tablename, False) if historic_table and show_historical: display = True elif not historic_table and show_historical: display = False elif historic_table and not show_historical: display = False elif not historic_table and not show_historical: display = True if display: individual_table = plugin_table[tablename] headers = individual_table[0] print t_title = "Table : " + str(table_count) \ + " " + tablename print t_title print "-" * len(t_title) report_table = PrettyTable(headers) for header in headers: report_table.align[header] = "l" report_table.padding_width = 3 rows = individual_table[1:] for row in rows: report_table.add_row(row) print str(report_table).center(c) print "-" * c print "*" * c return
def __start_game_allowed(self): return get_timestamp() > self._match.scheduled
def interrupt_allowed(self): return get_timestamp( ) > self._match.started + self.two_minute_mark if self._match.did_game_start else False
def end_game_allowed(self): return get_timestamp( ) > self._match.started + self.minimum_game_length if self._match.did_game_start else False
import datetime import os import logging import youtube_dl import unidecode import uuid import websockets import asyncio from utils import glog from utils.utils import get_timestamp from utils.logger import app_logger APP_PATH = os.environ.get('APP_PWD') DURATION_LIMIT = 600 target_client = None st = get_timestamp(time_format='%Y-%m-%d_%H-%M') async def notify(payload): async with websockets.connect(os.environ.get('SOCKET_URI')) as websocket: await websocket.send(str(payload).encode()) def progress_hook(d): progress_client_id = 'progress-' + str(uuid.uuid4()) try: tb = int(d['total_bytes']) except KeyError: tb = int(d['total_bytes_estimate']) payload = { 'name': progress_client_id,
def run(self): """ Actual execution starts here """ # Exit if the executor is not defined. execute = self.executor_data.get('executors', None) if execute is None: LOG.critical('Nothing to run') ha_infra.ha_exit(0) self.executor_threads = [] # clean up the xterm paths if os.path.exists(self.infra_path): shutil.rmtree(self.infra_path) ha_infra.start_run_time = \ utils.get_timestamp(complete_timestamp=True) user_env_pc = None if os.environ.get('PROMPT_COMMAND', None): # save the PROMPT_COMMAND to set xterm title for now user_env_pc = os.environ['PROMPT_COMMAND'] del os.environ['PROMPT_COMMAND'] for executor_index, executor_block in enumerate(execute): # Check whether the executor block needs to be repeated # process the repeat commandi if not executor_block: ha_infra.stop_run_time = \ utils.get_timestamp(complete_timestamp=True) LOG.info("******** Completing the execution ******** ") ha_infra.ha_exit(0) parallel = False repeat_count = 1 LOG.info('Executing %s' % str(executor_index+1)) if 'repeat' in executor_block: repeat_count = executor_block.get('repeat', 1) executor_block.pop('repeat') use_sync = False if 'sync' in executor_block: LOG.info("Sync is requested within the block") use_sync = executor_block.get('sync', False) LOG.info("Use Sync %s", use_sync) ha_interval = None ha_start_delay = None if 'ha_interval' in executor_block: ha_interval = executor_block.get('ha_interval', None) if 'ha_start_delay' in executor_block: ha_start_delay = executor_block.get('ha_start_delay', None) disruption_count = 1 if 'disruption_count' in executor_block: disruption_count = executor_block.get('disruption_count', None) LOG.info("Block will be repeated %s times", repeat_count) # Repeat count in each steps for i in range(repeat_count): LOG.info("******** Block Execution Count %s ******** ", str(i+1)) # process the mdoe command if 'mode' in executor_block: # if mode is parallel set parllel flag if executor_block['mode'].lower() == 'parallel': LOG.info('starting thread') parallel = True elif executor_block['mode'].lower() == 'sequence': LOG.info('sequential execution') else: LOG.critical('Unsupported mode , ' 'must be either ' '"parallel" or "sequence"') ha_infra.ha_exit(0) executor_block.pop('mode') # process the timer command if 'timer' in executor_block: # TODO: pradeech LOG.info('Timer....') executor_block.pop('timer') try: # Execute the command and the respective parameters del self.executor_threads[:] for step_action, nodes in executor_block.iteritems(): launched_process = 0 ha_infra.set_launched_process_count( launched_process) self.execute_the_block(executor_index, nodes, step_action, ha_interval, ha_start_delay, disruption_count, parallel=parallel, use_sync=use_sync) if self.executor_threads: # start all the executor threads [t.start() for t in self.executor_threads] [t.join() for t in self.executor_threads] ha_infra.display_infra_report() except NotImplementedError as runerror: LOG.critical('Unable to execute %s - %s' % runerror, step_action) ha_infra.ha_exit(0) except Exception as runerror: LOG.critical('Unable to continue execution %s' % str(runerror)) ha_infra.ha_exit(0) LOG.info("******** Completing the executions ******** ") ha_infra.stop_run_time = \ utils.get_timestamp(complete_timestamp=True) # clean up all the pipes for f in self.open_pipes: try: os.unlink(f) except: pass # restore the env variables if user_env_pc: os.environ['PROMPT_COMMAND'] = user_env_pc
def process_disruption(self, sync=None, finish_execution=None): self.sync = sync self.finish_execution = finish_execution table_name = "Process Disruption" infra.create_report_table(self, table_name) infra.add_table_headers( self, table_name, ["Host", "Process", "TimeStamp", "Status of Disruption"]) infra.display_on_terminal(self, "Entering Process Disruption plugin") input_args_dict = self.get_input_arguments() node_name = input_args_dict.keys()[0] input_args = input_args_dict.get(node_name, None) host_config = infra.get_openstack_config() if input_args: print "Inpt " + str(input_args) process_name = input_args.get('process_name', None) role = input_args.get('role', None) type = input_args.get('type', None) infra.display_on_terminal(self, "Process ", process_name, " will be disrupted") nodes_to_be_disrupted = [] for node in host_config: if 'controller' in host_config[node].get('role', None): infra.display_on_terminal(self, node, " will be disrupted ") nodes_to_be_disrupted.append(node) self.expected_failures.append(node + "::" + process_name) self.set_expected_failures(self.expected_failures) rhel_stop_command = "systemctl stop " + process_name rhel_start_command = "systemctl start " + process_name if sync: infra.display_on_terminal(self, "Waiting for notification") infra.wait_for_notification(sync) infra.display_on_terminal(self, "Received notification, Starting") ha_interval = self.get_ha_interval() disruption_count = self.get_disruption_count() infra.display_on_terminal(self, "Process will be disrupted ", str(disruption_count)) while infra.is_execution_completed(self.finish_execution) is False: if disruption_count: disruption_count = disruption_count - 1 for node in nodes_to_be_disrupted: ip = host_config.get(node, None).get('ip', None) user = host_config.get(node, None).get('user', None) password = host_config.get(node, None).get('password', None) infra.display_on_terminal(self, "IP: ", ip, " User: "******" Pwd: ", password) infra.display_on_terminal(self, "Stopping ", process_name) infra.display_on_terminal(self, "Executing ", rhel_stop_command) code, out, error = infra.ssh_and_execute_command( ip, user, password, rhel_stop_command) infra.add_table_rows(self, table_name, [[ ip, process_name, utils.get_timestamp(), HAConstants.WARNING + 'Stopped' + HAConstants.ENDC ]]) infra.display_on_terminal(self, "Sleeping for interval ", str(ha_interval), " seconds") time.sleep(ha_interval) infra.display_on_terminal(self, "Starting ", process_name) infra.display_on_terminal(self, "Executing ", rhel_start_command) code, out, error = infra.ssh_and_execute_command( ip, user, password, rhel_start_command) time.sleep(ha_interval) infra.add_table_rows(self, table_name, [[ ip, process_name, utils.get_timestamp(), HAConstants.OKGREEN + 'Started' + HAConstants.ENDC ]]) # bring it back to stable state infra.display_on_terminal(self, "Bringing the process to stable state") infra.display_on_terminal(self, "Executing ", rhel_start_command) code, out, error = infra.ssh_and_execute_command( ip, user, password, rhel_start_command) infra.display_on_terminal(self, "Finishing Process Disruption")
def __half_time(self): self._match.record_start_half(get_timestamp()) sleep(3 * 60) self._match.record_end_half(get_timestamp()) self._match.save()
def container_disruption(self, sync=None, finish_execution=None): self.sync = sync self.finish_execution = finish_execution infra.display_on_terminal(self, "Entering Container Disruption plugin") table_name = "Container Disruption" infra.create_report_table(self, table_name) infra.add_table_headers(self, table_name, ["Host", "Container Process", "Timestamp", "Status of Disruption"]) input_args_dict = self.get_input_arguments() node_name = input_args_dict.keys()[0] input_args = input_args_dict.get(node_name, None) host_config = infra.get_openstack_config() if input_args: print "Inpt " + str(input_args) container_name = input_args.get('container_name', None) role = input_args.get('role', None) disruption_type = input_args.get('disruption', None) infra.display_on_terminal(self, "Container ", container_name, " will be disrupted") nodes_to_be_disrupted = [] for node in host_config: if 'controller' in host_config[node].get('role', None): infra.display_on_terminal(self, node, " will be disrupted ") nodes_to_be_disrupted.append(node) # For now disrupt on only one node break # Deprecate process disruptor and converge on this for both cases later container_stop_command = "systemctl stop " + container_name container_start_command = "systemctl start " + container_name ha_start_delay = self.get_ha_start_delay() if sync: infra.display_on_terminal(self, "Waiting for notification") infra.wait_for_notification(sync) infra.display_on_terminal(self, "Received notification, Starting") # Start the actual disruption after 45 seconds time.sleep(ha_start_delay) ha_interval = self.get_ha_interval() disruption_count = self.get_disruption_count() if disruption_type == 'infinite': #Override the disruption count in executor.yaml disruption_count = 1 while infra.is_execution_completed(self.finish_execution) is False: if disruption_count: disruption_count = disruption_count - 1 for node in nodes_to_be_disrupted: ip = host_config.get(node, None).get('ip', None) user = host_config.get(node, None).get('user', None) password = host_config.get(node, None).get('password', None) infra.display_on_terminal(self, "Stopping ", container_name) infra.display_on_terminal(self, "Executing ", container_stop_command) code, out, error = infra.ssh_and_execute_command(ip, user, password, container_stop_command) infra.add_table_rows(self, table_name, [[ip, container_name, utils.get_timestamp(), HAConstants.WARNING + 'Stopped' + HAConstants.ENDC]]) if disruption_type == 'infinite': infra.display_on_terminal(self, "Infinite disruption chosen bring up container manually") break infra.display_on_terminal(self, "Sleeping for interval ", str(ha_interval), " seconds") time.sleep(ha_interval) infra.display_on_terminal(self, "Starting ", container_name) infra.display_on_terminal(self, "Executing ", container_start_command) code, out, error = infra.ssh_and_execute_command(ip, user, password, container_start_command) time.sleep(ha_interval) infra.add_table_rows(self, table_name, [[ip, container_name, utils.get_timestamp(), HAConstants.OKGREEN + 'Started' + HAConstants.ENDC]]) # bring it back to stable state if disruption_type != 'infinite': infra.display_on_terminal(self, "Bringing the container to stable state") infra.display_on_terminal(self, "Executing ", container_start_command) code, out, error = infra.ssh_and_execute_command(ip, user, password, container_start_command) infra.display_on_terminal(self, "Finishing Container Disruption")
def update_source_xml(self): # type: () -> None f_stream = open(self.original_file_path, 'r', encoding='utf-8') str_content = f_stream.read() f_stream.close() normalized_xml = normalize_xml_file_content(str_content) xml_root = etree.fromstring(normalized_xml) units_to_update = self.translation_units + [ u for u in self.untranslated if u.target_text != '' ] should_add_comment = False for t_unit in units_to_update: xml_search_query = './/string[@name=\"{}\"]'.format( t_unit.identifier) xml_t_unit_node = xml_root.find(xml_search_query) if xml_t_unit_node is None and t_unit.is_translated(): should_add_comment = True break if should_add_comment: comment_text = ' IMPORTED FROM GOOGLE SHEETS ({})'.format( get_timestamp()) comment_node = etree.Comment(comment_text) comment_node.tail = '\n\t' xml_root.append(comment_node) for t_unit in units_to_update: xml_search_query = './/string[@name=\"{}\"]'.format( t_unit.identifier) xml_t_unit_node = xml_root.find(xml_search_query) if xml_t_unit_node is None and t_unit.is_translated(): string_node = etree.Element('string') string_node.set('name', t_unit.identifier) string_node.tail = '\n\t' string_node.text = escape_xml_characters(t_unit.target_text) xml_root.append(string_node) elif xml_t_unit_node is not None: if string_has_placeholders(t_unit.target_text): xml_t_unit_node.text = escape_xml_characters( t_unit.target_text) else: xml_t_unit_node.text = unescape_xml_characters( t_unit.target_text) xml_string_content = etree.tostring( xml_root, pretty_print=True, encoding='utf-8', xml_declaration=True).decode('utf-8') # Indent the added comment (if any) if should_add_comment: xml_string_content = xml_string_content.replace( '\n<!--', '\n\n\t<!--') # Unescape the strings that have no placeholders xml_string_content = AndroidXmlFile.unescape_xml_string_content( xml_string_content) # Overwrite the source file with open(self.original_file_path, 'w', encoding='utf-8') as out_file: out_file.write(xml_string_content) pass
import yaml import sys import os sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')) sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '../..')) from utils.utils import get_timestamp sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')) with open('configs/configs_10.yaml') as f: configs = yaml.load(f) configs['num_channel'] = 6 if configs['use_normal'] else 3 # configs['git_hash'] = get_git_hash() configs['timestamp'] = get_timestamp() if __name__ == "__main__": print(configs)
def start(self, sync=None, finish_execution=None, args=None): ''' Required start method to implement for the class. ''' # Parse user data and Initialize. self.finish_execution = finish_execution data = self.get_input_arguments() self.loglevel = data['ansible'].get("loglevel", "DEBUG") self.frequency = data['ansible'].get('frequency', 5) self.max_hist_size = data['ansible'].get('max_hist', 25) self.dockerized = data['ansible'].get('dockerized', False) global LOG LOG = infra.ha_logging(__name__, level=self.loglevel) print "ANSIBLE LOG LEVEL: ", self.loglevel LOG.debug("User data: %s", data) # Get MariaDB Username/pass self.mariadb_user = None self.mariadb_password = None mariadb_info = data['ansible'].get('mariadb', None) if mariadb_info is not None: self.mariadb_user = data['ansible']['mariadb'].get('user', None) self.mariadb_password = data['ansible']['mariadb'].get('password', None) self.ansirunner = None setup_file = "../../configs/openstack_config.yaml" self.ansiresults = collections.deque(maxlen=self.max_hist_size) self.inventory = ConfigHelper(host_file=setup_file) LOG.debug("parsed data: ", self.inventory.parsed_data) host_list = self.inventory.get_host_list() host_ip_list = self.inventory.get_host_ip_list() control_ip_list = self.inventory.get_host_ip_list(role='controller') compute_ip_list = self.inventory.get_host_ip_list(role='compute') remote_user = self.inventory.get_host_username(host_list[0]) LOG.debug("Inventory: [all: %s], [control: %s] [compute: %s]", host_ip_list, control_ip_list, compute_ip_list) LOG.debug("Remote user: "******"Waiting for Runner Notification") infra.wait_for_notification(sync) infra.display_on_terminal(self, "Received notification from Runner") while infra.is_execution_completed(self.finish_execution) is False: #################################################### # Ansible Monitoring Loop. #################################################### ts_results = [] ts = utils.get_timestamp(complete_timestamp=True) ts_results.append({'name': 'ts', 'ts': ts}) msg = "=" * 50 + "\n" + "Timestamp: " + ts infra.display_on_terminal(self, msg) # Ping and SSH Check. host_ip_list = self.inventory.get_host_ip_list() ansi_results = self.ansible_ssh_ping_check(host_ip_list, remote_user) ts_results.append(ansi_results) # Process check. for service in SERVICE_LIST: host_ip_list = self.inventory.get_host_ip_list(role=service['role']) ansi_results = self.ansible_check_process(host_ip_list, remote_user, service['service']) ts_results.append(ansi_results) # RabbitMQ Check. host_ip_list = self.inventory.get_host_ip_list(role='controller') ansi_results = self.ansible_check_rabbitmq(host_ip_list, remote_user) ts_results.append(ansi_results) # MariaDB Check. ansi_results = self.ansible_check_mariadb(host_ip_list, remote_user) ts_results.append(ansi_results) # Add the ts results to main result list. self.ansiresults.append(ts_results) time.sleep(self.frequency) # Generate Summary Reports self.display_ansible_summary_report() self.display_asible_process_report() infra.display_infra_report() self.generate_graphs_output()
def jump_host_process_disruption(self, sync=None, finish_execution=None): self.sync = sync self.finish_execution = finish_execution infra.display_on_terminal(self, "Entering Jump Host Process Disruption plugin") table_name = "Jump host Process Disruption" infra.create_report_table(self, table_name) infra.add_table_headers(self, table_name, ["VM", "Process", "TimeStamp", "Status of Disruption"]) infra.display_on_terminal(self, "Entering Process Disruption plugin") input_args_dict = self.get_input_arguments() node_name = input_args_dict.keys()[0] input_args = input_args_dict.get(node_name, None) host_config = infra.get_openstack_config() print "*"*20 print input_args_dict print "input_args ==>",input_args print "host_config ==>",host_config nodes_to_be_disrupted = input_args.get('node',[]) process_name = input_args.get('process_name',[]) if input_args: print "Inpt " + str(input_args) role = input_args.get('role', None) # jump_hosts = [] for node in host_config: if role in host_config[node].get('role', None): jump_host = node # jump_hosts.append(node) print "###############",process_name # node_reboot_command = "reboot -f" # process_start_command = rhel_stop_command = "systemctl stop " + process_name rhel_start_command = "systemctl start " + process_name # jump host details jump_host_ip = host_config.get(node, None).get('ip', None) user = host_config.get(node,None).get('user',None) password = host_config.get(node,None).get('password',None) # copy necessary file to jump host runner = AnsibleRunner(jump_host_ip,user,password) infra.display_on_terminal(self, "Copying to ", jump_host_ip) runner.copy('jump_host_executor.py','scripts/','/tmp/') infra.display_on_terminal(self, "Copied to ", jump_host_ip) if self.sync: infra.display_on_terminal(self, "Waiting for notification") infra.wait_for_notification(sync) infra.display_on_terminal(self, "Received notification, Starting") ha_interval = self.get_ha_interval() #TODO - if its more than one jump host # Write into txt file to pass via ansible playbook ''' f = open('/tmp/remote_ips','w+') for ip in nodes_to_be_disrupted: f.write(ip+'\n') f.close() ''' while infra.is_execution_completed(self.finish_execution) is False: ip = node # openrc = host_config.get(node, None).get('openrc', None) # password = host_config.get(node, None).get('password', None) infra.display_on_terminal(self, "Nodes to be disrupted: ", str(nodes_to_be_disrupted), " Jump host: ", jump_host_ip) infra.display_on_terminal(self, "Executing ", rhel_stop_command) # ret = AnsibleRunner(jump_host_ip,user,password).execute_on_remote() infra.display_on_terminal(self, "Stopping ", process_name) # replacing the playbook logic with ansible runner # Execute the script on jump host ret = runner.shell('python /tmp/jump_host_executor.py "%s" "%s" >>/tmp/output'%(nodes_to_be_disrupted,rhel_stop_command)) print ret # Fetching the result to local runner.fetch('output','/tmp/','/tmp/hainfra/output') # Deleting the output file runner.shell('rm /tmp/output') # parse the output for report output_objs = eval(open('/tmp/hainfra/output','r').read()) print output_objs for results in output_objs: error = [] for (hostname, result) in results['contacted'].items(): if 'failed' in result: print "%s >>> %s" % (hostname, result['msg']) error = result['msg'] if error: infra.display_on_terminal(self, "Error ", error, "color=red") if not error: infra.add_table_rows(self, table_name, [[hostname, process_name, utils.get_timestamp(), HAConstants.OKGREEN + 'Stopped' + HAConstants.ENDC]]) else: infra.add_table_rows(self, table_name, [[hostname, process_name, utils.get_timestamp(), HAConstants.FAIL + str(error)+ HAConstants.ENDC]]) infra.display_on_terminal(self, "Will sleep for interval ", str(ha_interval)) time.sleep(ha_interval) infra.display_on_terminal(self, "Starting ", process_name) infra.display_on_terminal(self, "Executing ", rhel_start_command) ret = runner.shell('python /tmp/jump_host_executor.py "%s" "%s" >>/tmp/output'%(nodes_to_be_disrupted,rhel_start_command)) print ret runner.fetch('output','/tmp/','/tmp/hainfra/output') runner.shell('rm /tmp/output') # parse the output for report output_objs = eval(open('/tmp/hainfra/output','r').read()) for results in output_objs: hostname = results['hostname'] error = results['error'] if error: infra.display_on_terminal(self, "Error ", error, "color=red") if not error: infra.add_table_rows(self, table_name, [[hostname, process_name, utils.get_timestamp(), HAConstants.OKGREEN + 'Started' + HAConstants.ENDC]]) else: infra.add_table_rows(self, table_name, [[hostname, process_name, utils.get_timestamp(), HAConstants.FAIL + str(error)+ HAConstants.ENDC]]) infra.display_on_terminal(self, "Finishing Process Disruption")
def jump_host_disruption(self, sync=None, finish_execution=None): self.sync = sync self.finish_execution = finish_execution infra.display_on_terminal(self, "Entering Jump Host Disruption plugin") table_name = "Jump host Disruption" infra.create_report_table(self, table_name) infra.add_table_headers( self, table_name, ["VM", "IP", "TimeStamp", "Status of Disruption"]) infra.display_on_terminal(self, "Entering Process Disruption plugin") input_args_dict = self.get_input_arguments() node_name = input_args_dict.keys()[0] input_args = input_args_dict.get(node_name, None) host_config = infra.get_openstack_config() print "*" * 20 print input_args_dict print "input_args ==>", input_args print "host_config ==>", host_config nodes_to_be_disrupted = input_args.get('name', []) if input_args: print "Inpt " + str(input_args) role = input_args.get('role', None) # jump_hosts = [] for node in host_config: if role in host_config[node].get('role', None): jump_host = node # jump_hosts.append(node) print "###############", jump_host node_reboot_command = "reboot -f" if self.sync: infra.display_on_terminal(self, "Waiting for notification") infra.wait_for_notification(sync) infra.display_on_terminal(self, "Received notification, Starting") ha_interval = self.get_ha_interval() # jump host details jump_host_ip = host_config.get(node, None).get('ip', None) user = host_config.get(node, None).get('user', None) password = host_config.get(node, None).get('password', None) #TODO - if its more than one jump host # Write into txt file to pass via ansible playbook f = open('/tmp/remote_ips', 'w+') for ip in nodes_to_be_disrupted: f.write(ip + '\n') f.close() while infra.is_execution_completed(self.finish_execution) is False: # for node in nodes_to_be_disrupted: # node = nodes_to_be_disrupted[0] # ip = host_config.get(node, None).get('ip', None) # user = host_config.get(node, None).get('user', None) # password = host_config.get(node, None).get('password', None) ip = node # openrc = host_config.get(node, None).get('openrc', None) # password = host_config.get(node, None).get('password', None) infra.display_on_terminal(self, "Nodes to be disrupted: ", str(nodes_to_be_disrupted), " Jump host: ", jump_host_ip) infra.display_on_terminal(self, "Executing ", node_reboot_command) print "*" * 50 print "user :"******"password :"******"jump_host_ip :", jump_host_ip ret = AnsibleRunner(jump_host_ip, user, password).execute_on_remote() print ret # parse the output for report # node_list = os.walk('/tmp/hainfra').next()[1] # output_objs = eval(open('/tmp/hainfra/'+node+'/tmp/output','r').read()) output_objs = eval(open('/tmp/hainfra/output', 'r').read()) print output_objs for results in output_objs: error = [] for (hostname, result) in results['contacted'].items(): if 'failed' in result: print "%s >>> %s" % (hostname, result['msg']) error = result['msg'] if error: infra.display_on_terminal(self, "Error ", error, "color=red") infra.display_on_terminal(self, "waiting for ", hostname, " to " "come " "online") if infra.wait_for_ping(hostname, 240, 5): infra.display_on_terminal(self, "Node ", hostname, " is online", "color=green") infra.display_on_terminal(self, "Will sleep for interval ", str(ha_interval)) #time.sleep(ha_interval) if not error: infra.add_table_rows(self, table_name, [[ jump_host_ip, hostname, utils.get_timestamp(), HAConstants.OKGREEN + 'Rebooted' + HAConstants.ENDC ]]) else: infra.add_table_rows(self, table_name, [[ jump_host_ip, hostname, utils.get_timestamp(), HAConstants.FAIL + str(error) + HAConstants.ENDC ]]) # bring it back to stable state ''' infra.display_on_terminal(self, "Waiting for the node to become stable") if infra.wait_for_ping(hostname, 240, 10): infra.display_on_terminal(self, "Node ", hostname, " is in stable state", "color=green") ''' infra.display_on_terminal(self, "Finishing Node Disruption")
def __start_time(): return utils.get_timestamp()
def process_disruption(self, sync=None, finish_execution=None): self.sync = sync self.finish_execution = finish_execution table_name = "Process Disruption" infra.create_report_table(self, table_name) infra.add_table_headers(self, table_name, ["Host", "Process", "TimeStamp", "Status of Disruption"]) infra.display_on_terminal(self, "Entering Process Disruption plugin") input_args_dict = self.get_input_arguments() node_name = input_args_dict.keys()[0] input_args = input_args_dict.get(node_name, None) host_config = infra.get_openstack_config() if input_args: print "Inpt " + str(input_args) process_name = input_args.get('process_name', None) role = input_args.get('role', None) type = input_args.get('type', None) infra.display_on_terminal(self, "Process ", process_name, " will be disrupted") nodes_to_be_disrupted = [] for node in host_config: if 'controller' in host_config[node].get('role', None): infra.display_on_terminal(self, node, " will be disrupted ") nodes_to_be_disrupted.append(node) self.expected_failures.append(node + "::" + process_name) self.set_expected_failures(self.expected_failures) rhel_stop_command = "systemctl stop " + process_name rhel_start_command = "systemctl start " + process_name if sync: infra.display_on_terminal(self, "Waiting for notification") infra.wait_for_notification(sync) infra.display_on_terminal(self, "Received notification, Starting") ha_interval = self.get_ha_interval() disruption_count = self.get_disruption_count() infra.display_on_terminal(self, "Process will be disrupted " , str(disruption_count)) while infra.is_execution_completed(self.finish_execution) is False: if disruption_count: disruption_count = disruption_count - 1 for node in nodes_to_be_disrupted: ip = host_config.get(node, None).get('ip', None) user = host_config.get(node, None).get('user', None) password = host_config.get(node, None).get('password', None) infra.display_on_terminal(self, "IP: ", ip, " User: "******" Pwd: ", password) infra.display_on_terminal(self, "Stopping ", process_name) infra.display_on_terminal(self, "Executing ", rhel_stop_command) code, out, error = infra.ssh_and_execute_command(ip, user, password, rhel_stop_command) infra.add_table_rows(self, table_name, [[ip, process_name, utils.get_timestamp(), HAConstants.WARNING + 'Stopped' + HAConstants.ENDC]]) infra.display_on_terminal(self, "Sleeping for interval ", str(ha_interval), " seconds") time.sleep(ha_interval) infra.display_on_terminal(self, "Starting ", process_name) infra.display_on_terminal(self, "Executing ", rhel_start_command) code, out, error = infra.ssh_and_execute_command(ip, user, password, rhel_start_command) time.sleep(ha_interval) infra.add_table_rows(self, table_name, [[ip, process_name, utils.get_timestamp(), HAConstants.OKGREEN + 'Started' + HAConstants.ENDC]]) # bring it back to stable state infra.display_on_terminal(self, "Bringing the process to stable state") infra.display_on_terminal(self, "Executing ", rhel_start_command) code, out, error = infra.ssh_and_execute_command(ip, user, password, rhel_start_command) infra.display_on_terminal(self, "Finishing Process Disruption")
def generate_graphs_output(self): ''' Generate result data in the format required by the chart module. ''' print "Generate graphs" per_proc_result = {} for service in SERVICE_LIST: svcname = service['service'] per_proc_result[svcname] = {} per_proc_result[svcname]['reslist'] = [] # Go through results from all timestamps, and # generate the modified data structure. for ts_results in self.ansiresults: ts = None for results in ts_results: name = results.get('name', None) if name is None: continue if name == "ts": ts = results.get('ts', None) if name == "process_check": results['ts_start'] = ts results['ts_end'] = None procname = results['process'] if len(per_proc_result[procname]['reslist']) == 0: # This is the first time we are adding results per_proc_result[procname]['reslist'].append(results) else: # This is not the first time. Check if the new # result is same as the old one. lastidx = len(per_proc_result[procname]['reslist']) - 1 cur_res = per_proc_result[procname]['reslist'][lastidx] # If new res is same as old, then we update the # end status. if results['ansi_result']['status'] == \ cur_res['ansi_result']['status']: cur_res['ts_end'] = ts else: per_proc_result[procname]['reslist'].append(results) # Now copy the data to a file ansible_graph_file = "/tmp/ha_infra/ansible_graph.txt" #rescount = len(self.ansiresults) test_starttime = self.ansiresults[0][0]['ts'] # Capture end time. test_endtime = utils.get_timestamp(complete_timestamp=True) with open(ansible_graph_file, "w") as f: data = "starttime##%s\n" % test_starttime f.write(data) for proc in per_proc_result.keys(): for result in per_proc_result[proc]['reslist']: for host in result['ansi_result']['contacted']: if result['ansi_result']['status'] == "PASS": resval = "OK" else: resval = "CRITICAL" data = "%s,%s,%s,%s,%s\n" % \ (host, proc, result['ts_start'], resval, "Service Running") f.write(data) #print "%s: Start time: %s, End Time: %s, Status: %s" % \ # (proc, result['ts_start'], result['ts_end'], # result['ansi_result']['status']) data = "endtime##%s\n" % test_endtime f.write(data)
def vm_disruption(self, sync=None, finish_execution=None): self.sync = sync self.finish_execution = finish_execution infra.display_on_terminal(self, "Entering VM Disruption plugin") table_name = "VM Disruption" infra.create_report_table(self, table_name) infra.add_table_headers( self, table_name, ["VM", "IP", "TimeStamp", "Status of Disruption"]) infra.display_on_terminal(self, "Entering Process Disruption plugin") input_args_dict = self.get_input_arguments() print '========', input_args_dict node_name = input_args_dict.keys()[0] input_args = input_args_dict.get(node_name, None) host_config = infra.get_openstack_config() if input_args: print "Inpt " + str(input_args) role = input_args.get('role', None) print '*' * 20 print 'host_config ==', host_config print 'input_args ==', input_args print '*' * 20 # nodes_to_be_disrupted = [] for node in host_config: if role == host_config[node].get('role', None): jump_host = node print jump_host nodes_to_be_disrupted = input_args.get('name') print nodes_to_be_disrupted node_reboot_command = "reboot" if self.sync: infra.display_on_terminal(self, "Waiting for notification") infra.wait_for_notification(sync) infra.display_on_terminal(self, "Received notification, Starting") openrc = host_config.get(jump_host, None).get('openrc', None) password = host_config.get(jump_host, None).get('password', None) print openrc ha_interval = self.get_ha_interval() # for i in range(1): while infra.is_execution_completed(self.finish_execution) is False: for node in nodes_to_be_disrupted: # node = nodes_to_be_disrupted[0] # ip = host_config.get(node, None).get('ip', None) # user = host_config.get(node, None).get('user', None) # password = host_config.get(node, None).get('password', None) ip = node # openrc = host_config.get(node, None).get('openrc', None) # password = host_config.get(node, None).get('password', None) infra.display_on_terminal(self, "IP: ", ip, " openrc: ", openrc) infra.display_on_terminal(self, "Executing ", node_reboot_command) # Using nova api performing the vm stop operation cred = credentials.Credentials(openrc, password, 'no_env') try: nova = nova_api.NovaHealth(cred.get_nova_credentials_v2()) ret = nova.nova_stop_server(ip) time.sleep(ha_interval) infra.display_on_terminal(self, "Rebooting ", ip) nova.nova_start_server(ip) error = [] except Exception, error: pass if error: infra.display_on_terminal(self, "Error ", error, "color=red") infra.display_on_terminal(self, "waiting for ", ip, " to " "come " "online") if infra.wait_for_ping(ip, 240, 10): infra.display_on_terminal(self, "Node ", ip, " is online", "color=green") infra.display_on_terminal(self, "Will sleep for interval ", str(ha_interval)) #time.sleep(ha_interval) infra.add_table_rows(self, table_name, [[ node, ip, utils.get_timestamp(), HAConstants.OKGREEN + 'Rebooted' + HAConstants.ENDC ]])
def vm_disruption(self, sync=None, finish_execution=None): self.sync = sync self.finish_execution = finish_execution infra.display_on_terminal(self, "Entering VM Disruption plugin") table_name = "VM Disruption" infra.create_report_table(self, table_name) infra.add_table_headers(self, table_name, ["VM", "IP", "TimeStamp", "Status of Disruption"]) infra.display_on_terminal(self, "Entering Process Disruption plugin") input_args_dict = self.get_input_arguments() print '========',input_args_dict node_name = input_args_dict.keys()[0] input_args = input_args_dict.get(node_name, None) host_config = infra.get_openstack_config() if input_args: print "Inpt " + str(input_args) role = input_args.get('role', None) print '*'*20 print 'host_config ==',host_config print 'input_args ==',input_args print '*'*20 # nodes_to_be_disrupted = [] for node in host_config: if role == host_config[node].get('role', None): jump_host = node print jump_host nodes_to_be_disrupted = input_args.get('name') print nodes_to_be_disrupted node_reboot_command = "reboot" if self.sync: infra.display_on_terminal(self, "Waiting for notification") infra.wait_for_notification(sync) infra.display_on_terminal(self, "Received notification, Starting") openrc = host_config.get(jump_host, None).get('openrc', None) password = host_config.get(jump_host, None).get('password', None) print openrc ha_interval = self.get_ha_interval() # for i in range(1): while infra.is_execution_completed(self.finish_execution) is False: for node in nodes_to_be_disrupted: # node = nodes_to_be_disrupted[0] # ip = host_config.get(node, None).get('ip', None) # user = host_config.get(node, None).get('user', None) # password = host_config.get(node, None).get('password', None) ip = node # openrc = host_config.get(node, None).get('openrc', None) # password = host_config.get(node, None).get('password', None) infra.display_on_terminal(self, "IP: ", ip, " openrc: ", openrc) infra.display_on_terminal(self, "Executing ", node_reboot_command) # Using nova api performing the vm stop operation cred = credentials.Credentials(openrc, password,'no_env') try: nova = nova_api.NovaHealth(cred.get_nova_credentials_v2()) ret = nova.nova_stop_server(ip) time.sleep(ha_interval) infra.display_on_terminal(self, "Rebooting ",ip) nova.nova_start_server(ip) error = [] except Exception,error: pass if error: infra.display_on_terminal(self, "Error ", error, "color=red") infra.display_on_terminal(self, "waiting for ", ip, " to " "come " "online") if infra.wait_for_ping(ip, 240, 10): infra.display_on_terminal(self, "Node ", ip, " is online", "color=green") infra.display_on_terminal(self, "Will sleep for interval ", str(ha_interval)) #time.sleep(ha_interval) infra.add_table_rows(self, table_name, [[node, ip, utils.get_timestamp(), HAConstants.OKGREEN + 'Rebooted' + HAConstants.ENDC]])
def start_game(self): self.setup('Some Event', 'A', certify_referee(self.ref_data), timedelta(minutes=-2)) start_time = get_timestamp() self.meta.start_match(start_time) return start_time
def run(self): """ Actual execution starts here """ # Exit if the executor is not defined. execute = self.executor_data.get('executors', None) if execute is None: LOG.critical('Nothing to run') ha_infra.ha_exit(0) self.executor_threads = [] # clean up the xterm paths if os.path.exists(self.infra_path): shutil.rmtree(self.infra_path) ha_infra.start_run_time = \ utils.get_timestamp(complete_timestamp=True) user_env_pc = None if os.environ.get('PROMPT_COMMAND', None): # save the PROMPT_COMMAND to set xterm title for now user_env_pc = os.environ['PROMPT_COMMAND'] del os.environ['PROMPT_COMMAND'] for executor_index, executor_block in enumerate(execute): # Check whether the executor block needs to be repeated # process the repeat commandi if not executor_block: ha_infra.stop_run_time = \ utils.get_timestamp(complete_timestamp=True) LOG.info("******** Completing the execution ******** ") ha_infra.ha_exit(0) parallel = False repeat_count = 1 LOG.info('Executing %s' % str(executor_index + 1)) if 'repeat' in executor_block: repeat_count = executor_block.get('repeat', 1) executor_block.pop('repeat') use_sync = False if 'sync' in executor_block: LOG.info("Sync is requested within the block") use_sync = executor_block.get('sync', False) LOG.info("Use Sync %s", use_sync) ha_interval = None ha_start_delay = None if 'ha_interval' in executor_block: ha_interval = executor_block.get('ha_interval', None) if 'ha_start_delay' in executor_block: ha_start_delay = executor_block.get('ha_start_delay', None) disruption_count = 1 if 'disruption_count' in executor_block: disruption_count = executor_block.get('disruption_count', None) LOG.info("Block will be repeated %s times", repeat_count) # Repeat count in each steps for i in range(repeat_count): LOG.info("******** Block Execution Count %s ******** ", str(i + 1)) # process the mdoe command if 'mode' in executor_block: # if mode is parallel set parllel flag if executor_block['mode'].lower() == 'parallel': LOG.info('starting thread') parallel = True elif executor_block['mode'].lower() == 'sequence': LOG.info('sequential execution') else: LOG.critical('Unsupported mode , ' 'must be either ' '"parallel" or "sequence"') ha_infra.ha_exit(0) executor_block.pop('mode') # process the timer command if 'timer' in executor_block: # TODO: pradeech LOG.info('Timer....') executor_block.pop('timer') try: # Execute the command and the respective parameters del self.executor_threads[:] for step_action, nodes in executor_block.iteritems(): launched_process = 0 ha_infra.set_launched_process_count(launched_process) self.execute_the_block(executor_index, nodes, step_action, ha_interval, ha_start_delay, disruption_count, parallel=parallel, use_sync=use_sync) if self.executor_threads: # start all the executor threads [t.start() for t in self.executor_threads] [t.join() for t in self.executor_threads] ha_infra.display_infra_report() except NotImplementedError as runerror: LOG.critical('Unable to execute %s - %s' % runerror, step_action) ha_infra.ha_exit(0) except Exception as runerror: LOG.critical('Unable to continue execution %s' % str(runerror)) ha_infra.ha_exit(0) LOG.info("******** Completing the executions ******** ") ha_infra.stop_run_time = \ utils.get_timestamp(complete_timestamp=True) # clean up all the pipes for f in self.open_pipes: try: os.unlink(f) except: pass # restore the env variables if user_env_pc: os.environ['PROMPT_COMMAND'] = user_env_pc
def test_get_timestamp(self): stamp = utils.get_timestamp() assert stamp.endswith('Z')