def create_explorer(driver): try: # config name = config.STORAGE["AWS_EXPLORER_NAME"] ami = config.STORAGE["AWS_EXPLORER_IMAGE_ID"] instance_type = config.STORAGE["AWS_EXPLORER_INSTANCE_TYPE"] # cloud init rendered_file = config.render_template( config.STORAGE["EXPLORER_CLOUD_INIT"]) explorer_content = "" with open(rendered_file, 'r') as content_file: explorer_content = content_file.read() # create node = create_node(driver=driver, name=name, image=ami, size=instance_type, user_data=explorer_content) explorer = driver.wait_until_running(nodes=[node], wait_period=1, timeout=30) return explorer[0][0] except BaseHTTPError as e: logging.info("- An error occured creating the explorer")
def debug_msg(): file = parameter["dir_log"]+"/log.json" with open(file) as data: data = data.read() data = json.loads(data) filesize = lambda file : (os.path.getsize(file)*10485>>20)/10000.0 if os.path.exists(file) else 0 for d in data: d['size'] = "{} MB".format(filesize(d['file'])) d['file'] = d['file'].split('/')[-1] return render_template('record.html', records=data, colnames=['file','start_time','finish_time','size'])
def create_test_prepper(driver): try: # config name = config.STORAGE["AWS_TEST_PREPPER_NAME"] ami = config.STORAGE["AWS_TEST_PREPPER_IMAGE_ID"] instance_type = config.STORAGE["AWS_TEST_PREPPER_INSTANCE_TYPE"] disk_size = config.STORAGE["AWS_TEST_PREPPER_DISK_DIZE"] extra_disk_size = config.STORAGE["AWS_TEST_PREPPER_EXTRA_DISK_DIZE"] # cloud init cloud_init_file = os.path.join( config.STORAGE["BASE_CLOUD_INIT_PATH"], config.STORAGE["TEST_PREPPER_CLOUD_INIT"]) rendered_file = config.render_template(cloud_init_file) user_data = "" with open(rendered_file, 'r') as content_file: user_data = content_file.read() kwargs = {} # add 2048 gb volume extra_disk_disks = [{ "DeviceName": "/dev/sda1", "Ebs": { "Encrypted": "false", "DeleteOnTermination": "true", "VolumeSize": disk_size, "VolumeType": "gp2" } }, { "DeviceName": "/dev/sdb", "Ebs": { "Encrypted": "false", "DeleteOnTermination": "false", "VolumeSize": extra_disk_size, "VolumeType": "gp2" }, }] # create node = create_node(driver=driver, name=name, image=ami, size=instance_type, user_data=user_data, ex_blockdevicemappings=extra_disk_disks) running_node = driver.wait_until_running(nodes=[node], wait_period=1, timeout=30) return running_node[0][0] except BaseHTTPError as e: logging.info("- An error occured creating the test prepper")
def GET(self, arg): post = self.get_post(arg) f = comment_form() if post: post.view_count += 1 widget = get_sidebar() widget['relative_posts'] = sidebar.relative_posts(post) return render_template('single.html', form=f, admin = web.ctx.session.username, post= post, widget=widget, location= 'single') else: raise web.notfound
def create_core_template(driver, name=None, atoms_snapshot=None, ami=None): params = config.STORAGE["AWS_CORE_TEMPLATE_CONFIG"].copy() params["Action"] = "CreateLaunchTemplate" params["Action"] = "CreateLaunchTemplate" # override name if name != None: # make it easy to identify in the console params["LaunchTemplateName"] = name params["LaunchTemplateData.TagSpecification.1.Tag.2.Value"] = name # override atoms snapshot_id if atoms_snapshot != None: params['LaunchTemplateData.BlockDeviceMapping.2.Ebs.SnapshotId'] = \ atoms_snapshot else: logging.info("No snapshot provided for core template %s", params["LaunchTemplateName"]) # override ami if ami != None: params['LaunchTemplateData.ImageId'] = ami else: logging.info("No image AMI provided for core template %s", params["LaunchTemplateName"]) # cloud init cloud_init_file = os.path.join(config.STORAGE["BASE_CLOUD_INIT_PATH"], config.STORAGE["CORE_CLOUD_INIT"]) rendered_file = config.render_template(cloud_init_file) with open(rendered_file, 'r') as content_file: content = content_file.read() params["LaunchTemplateData.UserData"] = base64.urlsafe_b64encode( content.encode()).decode('utf-8') try: response = driver.connection.request("/", params=params) logging.info("- Core template created.") except BaseHTTPError as e: logging.info("- %s for %s.", e, params["LaunchTemplateName"])
def GET(self): i = web.input(page=1) try: page = int(i.page) except: page = 1 context = {} post_count = web.ctx.orm.query(Post).\ filter(Post.content_type=='post').count() page_count = post_count / POST_PER_PAGE if post_count % POST_PER_PAGE != 0: page_count += 1 context['widget'] = get_sidebar() context['page_count'] = page_count context['posts'] = web.ctx.orm.query(Post) context['page'] = page context['location'] = 'home' return render_template('index.html' , **context)
def homepage(): return render_template('index.html')
def page_not_found(error): return render_template('page_not_found.html'), 404
def initialize_test(): ComputeEngine = get_driver(Provider.GCE) gce = login_gcp(ComputeEngine) # firewall rules if '--destroy-firewall-rules' in sys.argv: logging.info("Destroying firewall rules...") destroy_ingress_rules(gce) else: logging.info("Checking if firewall rules are created...") create_ingress_rules(gce) # create test prepper if '--create-test-prepper' in sys.argv: if not test_prepper_exists(gce): logging.info("Creating dataset preparator...") rendered_file = config.render_template( config.STORAGE["TEST_PREPPER_CLOUD_INIT"]) test_prepper_rendered_file = open(rendered_file, 'r') create_test_prepper(gce, test_prepper_rendered_file) # destroy test prepper elif '--destroy-test-prepper' in sys.argv: if test_prepper_exists(gce): logging.info("Destroying dataset preparator...") destroy_node(gce, config.STORAGE["TEST_PREPPER_MACHINE_INSTANCE_NAME"]) # destroy explorer if '--destroy-explorer' in sys.argv: if explorer_exists(gce): logging.info("Destroying explorer node...") destroy_node(gce, config.STORAGE["EXPLORER_MACHINE_INSTANCE_NAME"]) # create explorer else: if not explorer_exists(gce): logging.info("Creating explorer...") if not os.environ.get("RADIX_MTPS_NETWORK_PASSWORD"): # generate random password os.environ[ "RADIX_MTPS_NETWORK_PASSWORD"] = config.generate_password( ) logging.info( "NOTE: generated the admin/metrics password for you: %s", os.environ["RADIX_MTPS_NETWORK_PASSWORD"]) if not os.environ.get("RADIX_MTPS_NGINX_ACCESS"): os.environ["RADIX_MTPS_NGINX_ACCESS"] = "SUCCESS" # render file rendered_file = config.render_template( config.STORAGE["EXPLORER_CLOUD_INIT"]) explorer_rendered_file = open(rendered_file, 'r') explorer = create_explorer(gce, explorer_rendered_file) logging.info("- Explorer: https://%s", explorer.public_ips[0]) else: logging.info("An explorer node seems to be up and running.") # destroy cores if '--destroy-cores' in sys.argv: # destroy core nodes logging.info("Destroying core nodes...") destroy_all_core_groups(gce) while count_core_groups(gce) > 0: logging.info( "Waiting for core nodes to come down, sleeping 10 seconds") time.sleep(10) # delete templates logging.debug("Destroying templates...") destroy_core_template( gce, config.STORAGE["CORE_MACHINE_INSTANCE_TEMPLATE_NAME"]) destroy_core_template( gce, config.STORAGE["CORE_MACHINE_BOOT_INSTANCE_TEMPLATE_NAME"]) destroy_core_template(gce, config.STORAGE["EXTRA_INSTANCE_TEMPLATE_NAME"]) # create cores else: os.environ["CORE_DOCKER_IMAGE"] = config.STORAGE["CORE_DOCKER_IMAGE"] explorer = get_explorer(gce) attempts = 0 # wait for the explorer to come up if necessary while attempts < 3: try: os.environ[ "RADIX_MTPS_NETWORK_EXPLORER_IP"] = explorer.public_ips[0] os.environ["RADIX_MTPS_NETWORK_ATOMS_FILE"] = os.environ.get( "RADIX_MTPS_NETWORK_ATOMS_FILE", config.STORAGE["DEFAULT_NETWORK_ATOMS_FILE"]) os.environ[ "RADIX_MTPS_NETWORK_PASSWORD"] = ssh.get_admin_password( explorer.public_ips[0]) attempts = 3 except Exception: attempts += 1 time.sleep(15) # start pumping URL if not os.environ.get("RADIX_MTPS_NETWORK_START_PUMP_URL"): os.environ["RADIX_MTPS_NETWORK_START_PUMP_URL"] = config.STORAGE[ "DEFAULT_NETWORK_START_URL"] boot_node = get_boot_node(gce) if boot_node: logging.info("A boot node seems to be up and running.") # extract the universe from host os.environ["RADIX_MTPS_NETWORK_UNIVERSE"] = ssh.get_test_universe( boot_node.public_ips[0]) else: # reconfigure shard allocator shard_count = os.environ.get( "RADIX_MTPS_SHARD_COUNT", config.STORAGE["DEFAULT_NETWORK_SHARD_COUNT"]) ssh.update_shard_count( explorer.public_ips[0], os.environ.get("RADIX_MTPS_SHARD_COUNT", config.STORAGE["DEFAULT_NETWORK_SHARD_COUNT"]), os.environ.get( "RADIX_MTPS_SHARD_OVERLAP", config.STORAGE["DEFAULT_NETWORK_SHARD_OVERLAP"])) # generate a new universe if "RADIX_MTPS_NETWORK_UNIVERSE" not in os.environ: logging.info("Generating new universe...") attempts = 0 # wait for docker to be available while attempts < 3: try: universe = ssh.generate_universe( explorer.public_ips[0]) attempts = 3 except Exception as e: if attempts == 2: raise Exception(e) attempts += 1 time.sleep(15) os.environ["RADIX_MTPS_NETWORK_UNIVERSE"] = universe # boot node logging.info("Creating boot node...") rendered_file = config.render_template( config.STORAGE["CORE_CLOUD_INIT"]) region = gce.ex_get_region( config.STORAGE["CORE_MACHINE_BOOT_NODE_LOCATION"]) create_core_template( gce, config.STORAGE["CORE_MACHINE_BOOT_INSTANCE_TEMPLATE_NAME"], open(rendered_file, "r")) create_core_group( gce, region, 1, config.STORAGE["CORE_MACHINE_BOOT_INSTANCE_TEMPLATE_NAME"], prefix="core-boot") boot_node = wait_boot_node(gce) wait_for_public_ip(gce, boot_node) # TODO: update node-finder with boot node info logging.info("Updating node finder...") ssh.update_node_finder(explorer.public_ips[0], boot_node.public_ips[0]) logging.debug("Test universe: %s", os.environ["RADIX_MTPS_NETWORK_UNIVERSE"]) logging.info("Test will run at: (go to %s)", os.environ["RADIX_MTPS_NETWORK_START_PUMP_URL"]) os.environ["RADIX_MTPS_NETWORK_SEEDS"] = boot_node.public_ips[0] # create core node machine template rendered_file = config.render_template( config.STORAGE["CORE_CLOUD_INIT"]) create_core_template( gce, config.STORAGE["CORE_MACHINE_INSTANCE_TEMPLATE_NAME"], open(rendered_file, "r")) for region, size in config.STORAGE["CORE_REGIONS"].items(): region = gce.ex_get_region(region) count = count_core_nodes(gce, region) if count == 0: logging.info("Creating %d Core node in %s...", size, region.name) create_core_group( gce, region, size, config.STORAGE["CORE_MACHINE_INSTANCE_TEMPLATE_NAME"]) elif count < size: logging.warning( "Not enough Core nodes in %s than requested: %d < %d", region.name, count, size) else: logging.info("Core nodes in %s are already up and running", region.name) # create extra nodes if "EXTRA_REGIONS" in config.STORAGE: rendered_file = config.render_template( config.STORAGE["CORE_CLOUD_EXTRA"]) create_core_template( gce, name=config.STORAGE["EXTRA_INSTANCE_TEMPLATE_NAME"], cloud_init=open(rendered_file, "r")) for region, size in config.STORAGE["EXTRA_REGIONS"].items(): region = gce.ex_get_region(region) count = count_core_nodes(gce, region, "extra") if count == 0: logging.info("Creating %d extra node in %s...", size, region.name) create_core_group( gce, region, size, config.STORAGE["EXTRA_INSTANCE_TEMPLATE_NAME"], prefix="extra") elif count < size: logging.warning( "Not enough Malicious nodes in %s than requested: %d < %d", region.name, count, size) else: logging.info( "Malicious nodes in %s are already up and running", region.name)