def handler(event, context): """invoke step function handler""" params = get_params() outputs = get_outputs() try: response = client.describe_addresses(Filters=[ { 'Name': 'instance-id', 'Values': [outputs['PlexEc2InstanceId']] }, ]) if (response['Addresses']): event['AssociationId'] = response['Addresses'][0]['AssociationId'] event['AllocationId'] = response['Addresses'][0]['AllocationId'] client.disassociate_address(AssociationId=event['AssociationId']) client.release_address(AllocationId=event['AllocationId']) event['ipReleaseStatus'] = 'IP_RELEASED' else: event['ipReleaseStatus'] = 'IP_ALREADY_RELEASED' except ClientError: event['ipReleaseStatus'] = 'IP_RELEASE_ERROR' return event
def handler(event, context): """Use PlexApi to remove old plex ec2 servers and MyPlex devices""" params = get_params() device_name = params['plex-ip'] if 'plex-ip' in params else 'noplexip' try: account = get_myplex_account() except: print("error signing in to plex account") return { 'statusCode': 500, 'body': '{"error": "error signing in to plex account"}', } for device in account.devices(): if (device.token != account.authenticationToken): if (device.name == 'plex-ec2'): device.delete() print("server --> removed old plex-ec2 server instance:", device.name, device.product, device.platform) if (device.name != device_name) and (device.product == 'PlexAPI'): device.delete() print("removed old plex-ec2 api instance:", device.name, device.product, device.platform) event['cleaned'] = True return event
def run(config): print("#! preparing data...") train_iter, valid_iter, test_iter, vocab = dataloader( config.batch_size, config.memory_size, config.task, config.joint, config.tenk) print("#! instantiating model...") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = MemN2N(get_params(config), vocab).to(device) if config.file: with open(os.path.join(config.save_dir, config.file), 'rb') as f: if torch.cuda.is_available(): state_dict = torch.load( f, map_location=lambda storage, loc: storage.cuda()) else: state_dict = torch.load( f, map_location=lambda storage, loc: storage) model.load_state_dict(state_dict) if config.train: print("#! training...") optimizer = optim.Adam(model.parameters(), config.lr) train(train_iter, model, optimizer, config.num_epochs, config.max_clip, valid_iter) if not os.path.isdir(config.save_dir): os.makedirs(config.save_dir) torch.save(model.state_dict(), os.path.join(config.save_dir, get_fname(config))) print("#! testing...") with torch.no_grad(): eval(test_iter, model, config.task)
def handler(event, context): """invoke step function handler""" params = get_params() outputs = get_outputs() print('plex-ip', params['plex-ip']) print('outputs', outputs['PlexEc2InstanceId']) try: response = client.describe_addresses(Filters=[ { 'Name': 'instance-id', 'Values': [outputs['PlexEc2InstanceId']] }, ]) if (response['Addresses']): event['ipAttachStatus'] = 'IP_ALREADY_ATTACHED' else: allocate_response = client.allocate_address() print('allocate_response', allocate_response) associate_response = client.associate_address( AllocationId=allocate_response['AllocationId'], InstanceId=outputs['PlexEc2InstanceId']) print('associate_response', associate_response) ssm_client.put_parameter( Name='/plex-ec2/plex-ip', Value=allocate_response['PublicIp'], Type='String', Overwrite=True, ) event['AssociationId'] = associate_response['AssociationId'] event['AllocationId'] = allocate_response['AllocationId'] event['PlexIp'] = allocate_response['PublicIp'] event['ipAttachStatus'] = 'IP_ATTACHED' except ClientError: event['ipAttachStatus'] = 'IP_ATTACH_ERROR' return event
import hoomd, hoomd.md import hoomd.deprecated as d import numpy.random as rand import sys import helpers maxT = 1e8 dcdPeriod = 1e6 msdPeriod = 1e5 pppmGrid = 64 pppmOrder = 6 rcut = 2.5 dt = 0.001 mix, q, e, T, nr, cc_eps = helpers.get_params(int(sys.argv[1])) jobname = helpers.make_name(mix, q, e, T, nr, cc_eps) mfile, ifile, tfile, efile, lfile = helpers.make_filenames(jobname) hoomd.context.initialize() uc = hoomd.lattice.unitcell( N=2, #two types of particles a1=[3, 0, 0], #unit cell axes a2=[ 0, 3, 0, ], a3=[0, 0, 3], dimensions=3, position=[[0, 0, 0], [0, 1.5, 0]], #xyz coords of the N particles type_name=['R', 'C'],
print(TOPICS) years = [2018, 2019, 2020] for year in years: print("Handling year: {}".format(year)) for topic in TOPICS: topic_path = make_path(downloads, topic.name) quarter = 0 print("Handling topic: {}".format(topic)) for _from, _to in get_quarters(year): quarter += 1 Q = "{}_Q{}.json".format(year, quarter) quarter_path = os.path.join(topic_path, Q) if os.path.isfile(quarter_path): print("{} exists, skipping".format(quarter_path)) continue print(quarter_path) print("Topic: {}, {}-{}".format(topic.name, _from, _to)) # cursor does not need to be implemented yet PARAMS = get_params(None, _from, _to, topic.ids, first=125) # 100 each Q, 400 a year, 4 topics is 1600 per year # 2 years: 3200 + 1quarter in 2020: 3600 total docs res = client.execute(QUERY, PARAMS) with open(quarter_path, "w") as tmp: tmp.write(json.dumps(res)) # page_info = res["events"]["pageInfo"] # print(json.dumps(page_info))
quarter = 0 print("Handling topic: {}".format(topic)) for _from, _to in get_quarters(year): quarter += 1 start_cursor = None for cursor_count in range(QUARTER_ITERATIONS): Q = "{}_Q{}_{}.json".format(year, quarter, cursor_count) quarter_path = os.path.join(topic_path, Q) if os.path.isfile(quarter_path): print("{} exists, skipping".format(quarter_path)) continue print(quarter_path) print("Topic: {}, {}-{}".format(topic.name, _from, _to)) # cursor does not need to be implemented yet PARAMS = get_params(start_cursor, _from, _to, topic.ids, first=EVENTS_PER_REQUEST) print(PARAMS) # 100 each Q, 400 a year, 4 topics is 1600 per year # 2 years: 3200 + 1quarter in 2020: 3600 total docs res = client.execute(QUERY, PARAMS) try: start_cursor = str(res["events"]["edges"][0]["cursor"]) except Exception as e: print("ERR", e) print(PARAMS) print(quarter_path) continue with open(quarter_path, "w") as tmp: tmp.write(json.dumps(res))
os.mkdir(path) return path downloads = make_path(os.getcwd(), 'downloads_evaluation') # we want approx 2000 labeled articles in total. # divided by 4 topics: 500 each # 4 quarters: 125 each quarter print(TOPICS) year = 2020 april_start, april_end = get_isodate(4, 4, 2020) print(april_start, april_end) for topic in TOPICS: topic_path = make_path(downloads, topic.name) quarter = 0 _file = "{}.json".format(year) _path = os.path.join(topic_path, _file) if os.path.isfile(_path): print("{} exists, skipping".format(_path)) continue print(_path) PARAMS = get_params(None, april_start, april_end, topic.ids, first=25) res = client.execute(QUERY, PARAMS) with open(_path, "w") as tmp: tmp.write(json.dumps(res))