def fix_server_in_unit(build_id, server_name, type, parameters): """ Fixes a server when something goes wrong in the unit :@param unit_id: The unit_id to delete :@param server: The server in the unit to act on :@param type: The type of fix. The following types are supported - ssh-key - Include the new public sshkey as a parameter - image-correction - Include the new image name for the server as a parameter :@param parameter: Parameter of the fix based on the type """ query_workouts = ds_client.query(kind='cybergym-workout') query_workouts.add_filter('unit_id', '=', build_id) for workout in list(query_workouts.fetch()): for server in workout['servers']: if server['name'] == server_name: if type == "ssh-key": print(f"Begin setting ssh key for {workout.key.name}") server['sshkey'] = parameters print(f"Completed setting ssh key for {workout.key.name}") elif type == "image-correction": old_server_image = server['image'] print(f"Begin changing the image name in {workout.key.name} from {old_server_image} " f"to {parameters}") server['image'] = parameters print(f"Completed image correction for {workout.key.name}") ds_client.put(workout)
def start_arena(unit_id): g_logger = log_client.logger('arena-actions') g_logger.log_struct({"message": "Starting arena {}".format(unit_id)}, severity=LOG_LEVELS.INFO) unit = ds_client.get(ds_client.key('cybergym-unit', unit_id)) state_transition(entity=unit, new_state=BUILD_STATES.STARTING) unit['arena']['running'] = True unit['arena']['gm_start_time'] = str(calendar.timegm(time.gmtime())) ds_client.put(unit) # Start the central servers g_logger.log_struct( {"message": "Starting central servers for arena {}".format(unit_id)}, severity=LOG_LEVELS.INFO) query_central_arena_servers = ds_client.query(kind='cybergym-server') query_central_arena_servers.add_filter("workout", "=", unit_id) for server in list(query_central_arena_servers.fetch()): # Publish to a server management topic pubsub_topic = PUBSUB_TOPICS.MANAGE_SERVER publisher = pubsub_v1.PublisherClient() topic_path = publisher.topic_path(project, pubsub_topic) future = publisher.publish(topic_path, data=b'Server Build', server_name=server['name'], action=SERVER_ACTIONS.START) print(future.result()) # Now start all of the student workouts for this arena for workout_id in unit['workouts']: start_vm(workout_id)
def restore_server(server_name): """ 1) Delete the server, 2) Set the image name to be one previously snapshotted, 3) Build the server, and then 4) restore the build configuration to the existing image in case it's later nuked. @param server_name: Name of the server. @type server_name: String @return: None """ server_delete(server_name) server = ds_client.get(ds_client.key('cybergym-server', server_name)) snapshots = compute.snapshots().list( project=project, filter=f"name = {server.key.name}*").execute() snapshot_name = snapshots['items'][0]['name'] sourceSnapshot = f"projects/ualr-cybersecurity/global/snapshots/{snapshot_name}" disks = [{ 'boot': True, 'autoDelete': True, 'initializeParams': { 'sourceSnapshot': sourceSnapshot, } }] old_config_disks = server['config']['disks'] server['config']['disks'] = disks ds_client.put(server) server_build(server_name) # Now restore the old image config in case the user might later need to nuke the workout. server['config']['disks'] = old_config_disks ds_client.put(server)
def delete_all_active_units(keep_data=False): """ This will delete ALL active units and should be used only during special functions when no other activity is occurring :param keep_data: Specifies whether to mark the active workouts as misfits or change the expiration time to now. """ query_workouts = ds_client.query(kind='cybergym-workout') query_workouts.add_filter('active', '=', True) for workout in list(query_workouts.fetch()): workout_project = workout.get('build_project_location', project) if workout_project == project: if 'state' in workout and workout['state'] != BUILD_STATES.DELETED: if keep_data: workout['expiration'] = 0 else: workout['misfit'] = True ds_client.put(workout) print( "All active workouts have been processed. Starting to process the delete workouts function" ) if keep_data: DeletionManager( deletion_type=DeletionManager.DeletionType.EXPIRED).run() else: DeletionManager( deletion_type=DeletionManager.DeletionType.MISFIT).run() print("Sent commands to delete workouts and arenas")
def stop_arena(unit_id): """ Arenas have server builds for the unit as well as individual workouts. This function stops all of these servers :param unit_id: The build ID of the arena :return: None """ # First stop the unit's servers result = compute.instances().list( project=project, zone=zone, filter='name = {}*'.format(unit_id)).execute() unit = ds_client.get(ds_client.key('cybergym-unit', unit_id)) unit['arena']['running'] = False ds_client.put(unit) g_logger = log_client.logger('arena-actions') if 'items' in result: for vm_instance in result['items']: response = compute.instances().stop( project=project, zone=zone, instance=vm_instance["name"]).execute() g_logger.log_struct( {"message": "Stopped servers for arena {}".format(unit_id)}, severity=LOG_LEVELS) else: g_logger.log_struct( {"message": "No servers in arena {} to stop".format(unit_id)}, severity=LOG_LEVELS.WARNING) for workout_id in unit['workouts']: g_logger = log_client.logger(str(workout_id)) result = compute.instances().list( project=project, zone=zone, filter='name = {}*'.format(workout_id)).execute() workout = ds_client.get(ds_client.key('cybergym-workout', workout_id)) workout['running'] = False ds_client.put(workout) if 'items' in result: for vm_instance in result['items']: response = compute.instances().stop( project=project, zone=zone, instance=vm_instance["name"]).execute() g_logger.log_struct( { "message": "Workout servers stopped for workout {}".format(workout_id) }, severity=LOG_LEVELS.INFO) else: g_logger.log_struct( { "message": "No servers to stop for workout {}".format(workout_id) }, severity=LOG_LEVELS.WARNING)
def extend_timeout_unit(unit_id, hours): """ Extend the number of days before the workout automatically expires for a given unit. :param unit_id: The unit_id :param days: Number of days to extend for the unit """ query_workouts = ds_client.query(kind='cybergym-workout') query_workouts.add_filter('unit_id', '=', unit_id) for workout in list(query_workouts.fetch()): current_expiration = int(workout['expiration']) workout['expiration'] = f"{current_expiration + days}" ds_client.put(workout)
def update_student_instructions_for_unit(unit_id, instructions_file): """ Update the student instructions with the specified file. @param unit_id: The unit to update @type unit_id: str @param instructions_file: File to use in the Cloud storage under the globally defined student_instructions_url @type instructions_file: str """ query_workouts = ds_client.query(kind='cybergym-workout') query_workouts.add_filter('unit_id', '=', unit_id) for workout in list(query_workouts.fetch()): workout[ 'student_instructions_url'] = f"{student_instructions_url}{instructions_file}" ds_client.put(workout)
def add_child_project(child_project): """ Add a child project to this parent project. This function gets called by the main application when a new child project has been provisioned. @param child_project: Name of the child project @type child_project: String @return: None """ admin_info = ds_client.get(ds_client.key(AdminInfoEntity.KIND, 'cybergym')) if AdminInfoEntity.Entities.CHILD_PROJECTS in admin_info: admin_info[AdminInfoEntity.Entities.CHILD_PROJECTS].append( child_project) else: admin_info[AdminInfoEntity.Entities.CHILD_PROJECTS] = [child_project] ds_client.put(admin_info) return True
def start_vm(workout_id): print("Starting workout %s" % workout_id) workout = ds_client.get(ds_client.key('cybergym-workout', workout_id)) state_transition(entity=workout, new_state=BUILD_STATES.STARTING) workout['start_time'] = str(calendar.timegm(time.gmtime())) ds_client.put(workout) query_workout_servers = ds_client.query(kind='cybergym-server') query_workout_servers.add_filter("workout", "=", workout_id) for server in list(query_workout_servers.fetch()): # Publish to a server management topic pubsub_topic = PUBSUB_TOPICS.MANAGE_SERVER publisher = pubsub_v1.PublisherClient() topic_path = publisher.topic_path(project, pubsub_topic) future = publisher.publish(topic_path, data=b'Server Build', server_name=server['name'], action=SERVER_ACTIONS.START) print(future.result())
def stop_workout(workout_id): result = compute.instances().list( project=project, zone=zone, filter='name = {}*'.format(workout_id)).execute() workout = ds_client.get(ds_client.key('cybergym-workout', workout_id)) state_transition(entity=workout, new_state=BUILD_STATES.READY, existing_state=BUILD_STATES.RUNNING) ds_client.put(workout) if 'items' in result: for vm_instance in result['items']: response = compute.instances().stop( project=project, zone=zone, instance=vm_instance["name"]).execute() print("Workouts stopped") else: print("No workouts to stop")
def stop_workout(workout_id): result = compute.instances().list( project=project, zone=zone, filter='name = {}*'.format(workout_id)).execute() workout = ds_client.get(ds_client.key('cybergym-workout', workout_id)) state_transition(entity=workout, new_state=BUILD_STATES.READY, existing_state=BUILD_STATES.RUNNING) start_time = None if 'start_time' in workout: start_time = workout['start_time'] stop_time = calendar.timegm(time.gmtime()) runtime = int(stop_time) - int(start_time) if 'runtime_counter' in workout: accumulator = workout['runtime_counter'] new_runtime = int(accumulator) + runtime workout['runtime_counter'] = new_runtime else: workout['runtime_counter'] = runtime ds_client.put(workout) query_workout_servers = ds_client.query(kind='cybergym-server') query_workout_servers.add_filter("workout", "=", workout_id) for server in list(query_workout_servers.fetch()): # Publish to a server management topic pubsub_topic = PUBSUB_TOPICS.MANAGE_SERVER publisher = pubsub_v1.PublisherClient() topic_path = publisher.topic_path(project, pubsub_topic) future = publisher.publish(topic_path, data=b'Server Build', server_name=server['name'], action=SERVER_ACTIONS.STOP) print(future.result()) g_logger = log_client.logger(str(workout_id)) if 'items' in result: for vm_instance in result['items']: response = compute.instances().stop( project=project, zone=zone, instance=vm_instance["name"]).execute() g_logger.log_struct({"message": "Workout stopped"}, severity=LOG_LEVELS.INFO) else: g_logger.log_struct({"message": "No workouts to stop"}, severity=LOG_LEVELS.WARNING)
def check_ordered_arenas_state(unit, ordered_state): """ Workouts are built in a designated order. This function checks the state to determine if the unit is in a valid state to begin performing a function :param unit: A datastore cybergym-unit entity :param ordered_state: An ordered state to verify it has not already been performed for a given unit. For example, we would not want to attempt building a network again if it's already built. """ if 'state' not in unit: unit['state'] = None ds_client.put(unit) return False if unit['state'] not in ordered_arena_states: return False if ordered_arena_states[unit['state']] <= ordered_arena_states[ordered_state]: return True else: return False
def stop_arena(unit_id): """ Arenas have server builds for the unit as well as individual workouts. This function stops all of these servers :param unit_id: The build ID of the arena :return: None """ # First stop the unit's servers result = compute.instances().list( project=project, zone=zone, filter='name = {}*'.format(unit_id)).execute() unit = ds_client.get(ds_client.key('cybergym-unit', unit_id)) unit['arena']['running'] = False ds_client.put(unit) if 'items' in result: for vm_instance in result['items']: response = compute.instances().stop( project=project, zone=zone, instance=vm_instance["name"]).execute() print("Unit servers stopped") else: print("No unit servers to stop") for workout_id in unit['workouts']: result = compute.instances().list( project=project, zone=zone, filter='name = {}*'.format(workout_id)).execute() workout = ds_client.get(ds_client.key('cybergym-workout', workout_id)) workout['running'] = False ds_client.put(workout) if 'items' in result: for vm_instance in result['items']: response = compute.instances().stop( project=project, zone=zone, instance=vm_instance["name"]).execute() print("Workout servers stopped for %s" % workout_id) else: print("No workout servers to stop for %s" % workout_id)
def state_transition(entity, new_state, existing_state=None): """ Consistently changes a datastore entity with the necessary state checks. :param entity: A datastore entity :param new_state: The new state for the server :param existing_state: A check for the existing state of the server. This defaults to None. :return: Boolean on success. If the expected existing state is not the same as the actual, this returns False. """ if 'state' not in entity: entity['state'] = None if existing_state and entity['state'] != existing_state: print( f"Error in state transition: Expected entity to be in {existing_state} state. Instead, it was in the state" f" {entity['state']}") return False ts = str(calendar.timegm(time.gmtime())) entity['state'] = new_state entity['state-timestamp'] = ts ds_client.put(entity) return True
def create_new_workout_in_unit(unit_id, student_name, email_address=None, registration_required=False): """ Use this script to add a new workout for a new registered user for a preexising unit @param unit_id: The unit_id to add the server to @type unit_id: String @param student_name: Name of student to add @type build_server_spec: String @param email_address: Email address of the student to add @type build_server_spec: String @param registration_required: Whether the new workout requires registration @return: None @rtype: None """ unit = ds_client.get(ds_client.key('cybergym-unit', unit_id)) workout_template_id = unit['workouts'][0] new_workout = ds_client.get( ds_client.key('cybergym-workout', workout_template_id)) new_id = ''.join(random.choice(string.ascii_lowercase) for i in range(10)) new_workout.key = ds_client.key('cybergym-workout', new_id) new_workout['state'] = BUILD_STATES.START new_workout['student_email'] = email_address new_workout['student_name']['student_name'] = student_name new_workout['student_name']['student_email'] = email_address new_workout['registration_required'] = registration_required unit['workouts'].append(new_workout.key.name) ds_client.put(unit) ds_client.put(new_workout) if registration_required: print(f"New registered workout created for {email_address}") else: print(f"New workout ID is {new_id}")
def delete_unit(unit_id, delete_key=False, delete_immediately=False): """ Deletes a full unit when it was created on accident :param unit_id: The unit_id to delete :param delete_key: Boolean on whether to delete the Datastore entity :param delete_immediately: Whether to delete immediately or create misfits and let the cloud function delete this. """ bm = BudgetManager() query_workouts = ds_client.query(kind='cybergym-workout') query_workouts.add_filter('unit_id', '=', unit_id) for workout in list(query_workouts.fetch()): workout['misfit'] = True ds_client.put(workout) print( "All workouts marked as misfits. Starting to process the delete workouts function" ) if bm.check_budget(): DeletionManager( deletion_type=DeletionManager.DeletionType.MISFIT).run() print("Completed deleting workouts") else: print( "Cannot delete misfits. Budget exceeded variable is set for this project." )
def update_registered_email(class_name, curr_email, new_email): """ Updates student email for all assigned workouts. Intended for cases where student was registered in a class under an incorrect email address. :param class_name: Name of the class we want to update student email in :param curr_email: Email we want to update :param new_email: Email we want to update with """ # Query target class cybergym_class = ds_client.query(kind='cybergym-class') cybergym_class.add_filter('class_name', '=', class_name) cybergym_class_list = list(cybergym_class.fetch()) # Update class roster for classes in cybergym_class_list: for student in classes['roster']: if student['student_email'] == curr_email: print(f'[+] Update class roster with {new_email}') student['student_email'] = new_email ds_client.put(classes) break # Update all workouts for curr_email query_workouts = ds_client.query(kind='cybergym-workout') query_workouts.add_filter('student_email', '=', curr_email) for workout in list(query_workouts.fetch()): workout['student_email'] = new_email print(f'[*] Updating workout\'s student_email with {new_email}') ds_client.put(workout) # Finally, replace current email with new email in authed students list query_auth_users = ds_client.query(kind='cybergym-admin-info') for students in list(query_auth_users.fetch()): for pos, student in enumerate(students['students']): if student == curr_email: students['students'][pos] = new_email print( f'[+] Replaced {curr_email} with {students["students"][pos]} in authed users list' ) ds_client.put(students) break print('[+] Update complete!')
def create_instance_custom_image(compute, workout, name, custom_image, machine_type, networkRouting, networks, tags, meta_data, sshkey=None, student_entry=False, minCpuPlatform=None): """ Core function to create a new server according to the input specification. This gets called through a cloud function during the automatic build :param compute: A compute object to build the server :param workout: The ID of the build :param name: Name of the server :param custom_image: Cloud image to use for the build :param machine_type: The cloud machine type :param networkRouting: True or False whether this is a firewall which routes traffic :param networks: The NIC specification for this server :param tags: Tags are key and value pairs which sometimes define the firewall rules :param meta_data: This includes startup scripts :param sshkey: If the server is running an SSH service, then this adds the public ssh key used for connections :param student_entry: If this is a student_entry image, then add that to the configuration. :return: None """ # First check to see if the server configuration already exists. If so, then return without error exists_check = ds_client.query(kind='cybergym-server') exists_check.add_filter("name", "=", name) if exists_check.fetch().num_results > 0: print(f'Server {name} already exists. Skipping configuration') return image_response = compute.images().get(project=project, image=custom_image).execute() source_disk_image = image_response['selfLink'] # Configure the machine machine = "zones/%s/machineTypes/%s" % (zone, machine_type) networkInterfaces = [] for network in networks: if network["external_NAT"]: accessConfigs = {'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'} else: accessConfigs = None add_network_interface = { 'network': 'projects/%s/global/networks/%s' % (project, network["network"]), 'subnetwork': 'regions/us-central1/subnetworks/' + network["subnet"], 'accessConfigs': [accessConfigs] } if 'internal_IP' in network: add_network_interface['networkIP'] = network["internal_IP"] if 'aliasIpRanges' in network: add_network_interface['aliasIpRanges'] = network['aliasIpRanges'] networkInterfaces.append(add_network_interface) config = { 'name': name, 'machineType': machine, # allow http and https server with tags 'tags': tags, # Specify the boot disk and the image to use as a source. 'disks': [{ 'boot': True, 'autoDelete': True, 'initializeParams': { 'sourceImage': source_disk_image, } }], 'networkInterfaces': networkInterfaces, # Allow the instance to access cloud storage and logging. 'serviceAccounts': [{ 'email': 'default', 'scopes': [ 'https://www.googleapis.com/auth/devstorage.read_write', 'https://www.googleapis.com/auth/logging.write' ] }], } if meta_data: config['metadata'] = {'items': meta_data} if sshkey: if 'items' in meta_data: config['metadata']['items'].append({ "key": "ssh-keys", "value": sshkey }) else: config['metadata'] = { 'items': { "key": "ssh-keys", "value": sshkey } } # For a network routing firewall (i.e. Fortinet) add an additional disk for logging. if networkRouting: config["canIpForward"] = True # Commented out because only Fortinet uses this. Need to create a custom build template instead. # new_disk = {"mode": "READ_WRITE", "boot": False, "autoDelete": True, # "source": "projects/" + project + "/zones/" + zone + "/disks/" + name + "-disk"} # config['disks'].append(new_disk) if minCpuPlatform: config['minCpuPlatform'] = minCpuPlatform new_server = datastore.Entity(ds_client.key('cybergym-server', f'{name}')) new_server.update({ 'name': name, 'workout': workout, 'config': config, 'state': SERVER_STATES.READY, 'state-timestamp': str(calendar.timegm(time.gmtime())), 'student_entry': student_entry }) ds_client.put(new_server) # Publish to a server build topic pubsub_topic = PUBSUB_TOPICS.MANAGE_SERVER publisher = pubsub_v1.PublisherClient() topic_path = publisher.topic_path(project, pubsub_topic) future = publisher.publish(topic_path, data=b'Server Build', server_name=name, action=SERVER_ACTIONS.BUILD) print(future.result())
def server_build(server_name): """ Builds an individual server based on the specification in the Datastore entity with name server_name. :param server_name: The Datastore entity name of the server to build :return: A boolean status on the success of the build """ server = ds_client.get(ds_client.key('cybergym-server', server_name)) build_id = server['workout'] g_logger = log_client.logger(str(server_name)) state_transition(entity=server, new_state=SERVER_STATES.BUILDING) config = server['config'].copy() """ Currently, we need a workaround to insert the guacamole startup script because of a 1500 character limit on indexed fields. The exclude_from_index does not work on embedded datastore fields """ if 'student_entry' in server and server['student_entry']: config['metadata'] = { 'items': [{ "key": "startup-script", "value": server['guacamole_startup_script'] }] } # Begin the server build and keep trying for a bounded number of additional 30-second cycles i = 0 build_success = False while not build_success and i < 5: workout_globals.refresh_api() try: if server['add_disk']: try: image_config = { "name": server_name + "-disk", "sizeGb": server['add_disk'], "type": "projects/" + project + "/zones/" + zone + "/diskTypes/pd-ssd" } response = compute.disks().insert( project=project, zone=zone, body=image_config).execute() compute.zoneOperations().wait( project=project, zone=zone, operation=response["id"]).execute() except HttpError as err: # If the disk already exists (i.e. a nuke), then ignore if err.resp.status in [409]: pass if server['build_type'] == BUILD_TYPES.MACHINE_IMAGE: source_machine_image = f"projects/{project}/global/machineImages/{server['machine_image']}" compute_beta = discovery.build('compute', 'beta') response = compute_beta.instances().insert( project=project, zone=zone, body=config, sourceMachineImage=source_machine_image).execute() else: if "delayed_start" in server and server["delayed_start"]: time.sleep(30) response = compute.instances().insert(project=project, zone=zone, body=config).execute() build_success = True g_logger.log_text( f'Sent job to build {server_name}, and waiting for response') except BrokenPipeError: i += 1 except HttpError as exception: cloud_log( build_id, f"Error when trying to build {server_name}: {exception.reason}", LOG_LEVELS.ERROR) return False i = 0 success = False while not success and i < 5: try: g_logger.log_text( f"Begin waiting for build operation {response['id']}") compute.zoneOperations().wait(project=project, zone=zone, operation=response["id"]).execute() success = True except timeout: i += 1 g_logger.log_text('Response timeout for build. Trying again') pass if success: g_logger.log_text(f'Successfully built server {server_name}') state_transition(entity=server, new_state=SERVER_STATES.RUNNING, existing_state=SERVER_STATES.BUILDING) else: g_logger.log_text(f'Timeout in trying to build server {server_name}') state_transition(entity=server, new_state=SERVER_STATES.BROKEN) return False # If this is a student entry server, register the DNS if 'student_entry' in server and server['student_entry']: g_logger.log_text(f'Setting DNS record for {server_name}') ip_address = register_student_entry(server['workout'], server_name) server['external_ip'] = ip_address ds_client.put(server) server = ds_client.get(ds_client.key('cybergym-server', server_name)) # Now stop the server before completing g_logger.log_text(f'Stopping {server_name}') compute.instances().stop(project=project, zone=zone, instance=server_name).execute() state_transition(entity=server, new_state=SERVER_STATES.STOPPED) # If no other servers are building, then set the workout to the state of READY. check_build_state_change(build_id=build_id, check_server_state=SERVER_STATES.STOPPED, change_build_state=BUILD_STATES.READY)
def build_guacamole_server(build, network, guacamole_connections): """ Builds an image with an Apache Guacamole server and adds startup scripts to insert the correct users and connections into the guacamole database. This server becomes the entrypoint for all students in the arena. :param type: Either workout or arena build. :param build: Build Entity for the workout or arena. :param network: The network name for the server :param guacamole_connections: An array of dictionaries for each student {workoutid, ip address of their server, and password for their server. :return: Null """ build_id = build.key.name if len(guacamole_connections) == 0: return None startup_script = workout_globals.guac_startup_begin.format( guacdb_password=guac_password) i = 0 for connection in guacamole_connections: # Get a PRNG password for the workout and store it with the datastore record for display on the workout controller guac_user = '******' + str(i + 1) guac_connection_password = get_random_alphaNumeric_string() workout = ds_client.get( ds_client.key('cybergym-workout', connection['workout_id'])) workout['workout_user'] = guac_user workout['workout_password'] = guac_connection_password ds_client.put(workout) safe_password = connection['password'].replace('$', '\$') safe_password = safe_password.replace("'", "\'") startup_script += workout_globals.guac_startup_user_add.format( user=guac_user, name=guac_user, guac_password=guac_connection_password) if connection['entry_type'] == 'vnc': startup_script += workout_globals.guac_startup_vnc.format( ip=connection['ip'], connection=connection['workout_id'], vnc_password=safe_password) else: startup_script += workout_globals.guac_startup_rdp.format( ip=connection['ip'], connection=connection['workout_id'], rdp_username=connection['username'], rdp_password=safe_password, security_mode=connection['security-mode']) startup_script += workout_globals.guac_startup_join_connection_user i += 1 startup_script += workout_globals.guac_startup_end server_name = "%s-%s" % (build_id, 'student-guacamole') tags = {'items': ['student-entry']} nics = [{ "network": network, "subnet": "%s-%s" % (network, 'default'), "external_NAT": True }] meta_data = {"key": "startup-script", "value": startup_script} try: create_instance_custom_image(compute=compute, workout=build_id, name=server_name, custom_image=student_entry_image, machine_type='n1-standard-1', networkRouting=False, networks=nics, tags=tags, meta_data=meta_data, sshkey=None, student_entry=True) # Create the firewall rule allowing external access to the guacamole connection allow_entry = [{ "name": "%s-%s" % (build_id, 'allow-student-entry'), "network": network, "targetTags": ['student-entry'], 'protocol': None, 'ports': ['tcp/80,8080,443'], 'sourceRanges': ['0.0.0.0/0'] }] create_firewall_rules(allow_entry) except errors.HttpError as err: # 409 error means the server already exists. if err.resp.status in [409]: pass else: raise
def server_build(server_name): """ Builds an individual server based on the specification in the Datastore entity with name server_name. :param server_name: The Datastore entity name of the server to build :return: A boolean status on the success of the build """ print(f'Building server {server_name}') server = ds_client.get(ds_client.key('cybergym-server', server_name)) state_transition(entity=server, new_state=SERVER_STATES.BUILDING) # Commented because this is only for Fortinet right now. # if 'canIPForward' in server and server['config']['canIpForward']: # image_config = {"name": server_name + "-disk", "sizeGb": 30, # "type": "projects/" + project + "/zones/" + zone + "/diskTypes/pd-ssd"} # response = compute.disks().insert(project=project, zone=zone, body=image_config).execute() # compute.zoneOperations().wait(project=project, zone=zone, operation=response["id"]).execute() # Begin the server build and keep trying for a bounded number of additional 30-second cycles i = 0 build_success = False while not build_success and i < 5: workout_globals.refresh_api() try: response = compute.instances().insert(project=project, zone=zone, body=server['config']).execute() build_success = True print(f'Sent job to build {server_name}, and waiting for response') except BrokenPipeError: i += 1 i = 0 success = False while not success and i < 5: try: print(f"Begin waiting for build operation {response['id']}") compute.zoneOperations().wait(project=project, zone=zone, operation=response["id"]).execute() success = True except timeout: i += 1 print('Response timeout for build. Trying again') pass if success: print(f'Successfully built server {server_name}') state_transition(entity=server, new_state=SERVER_STATES.RUNNING, existing_state=SERVER_STATES.BUILDING) else: print(f'Timeout in trying to build server {server_name}') state_transition(entity=server, new_state=SERVER_STATES.BROKEN) return False # If this is a student entry server, register the DNS if 'student_entry' in server and server['student_entry']: print(f'Setting DNS record for {server_name}') ip_address = register_student_entry(server['workout'], server_name) server['external_ip'] = ip_address ds_client.put(server) server = ds_client.get(ds_client.key('cybergym-server', server_name)) # Now stop the server before completing print(f'Stopping {server_name}') compute.instances().stop(project=project, zone=zone, instance=server_name).execute() state_transition(entity=server, new_state=SERVER_STATES.STOPPED) # If no other servers are building, then set the workout to the state of READY. build_id = server['workout'] check_build_state_change(build_id=build_id, check_server_state=SERVER_STATES.STOPPED, change_build_state=BUILD_STATES.READY)