def treatment(path_to='../files/input/'): # Criação de data frames através de ficheiros csv _train = h.get_data(path_to + 'train.csv') _features = h.get_data(path_to + 'features.csv') _stores = h.get_data(path_to + 'stores.csv') _test = h.get_data(path_to + 'test.csv') # Substituição do tipo de loja por valores númerico # de forma a não existir conflitos nos algoritmos de classificação _stores['Type'] = _stores['Type'].map({'A': 0, 'B': 1, 'C': 2}) # Junção de data frames por forma a ter um contendo todos os dados _data = h.merge_data(_features, _stores, 'Store') _data = h.merge_data(_data, _train, ['Store', 'Date']) _test = h.merge_data(_features, _test, ['Store', 'Date']) _test = h.merge_data(_stores, _test, 'Store') # Correção da coluna IsHoliday (devido á junção) e correspondente conversão de booleano para númerico _data = _data.drop('IsHoliday_y', axis=1) _data = _data.rename(columns={'IsHoliday_x': 'IsHoliday'}) _data['IsHoliday'] = _data['IsHoliday'].map({False: 0, True: 1}) _test = _test.drop('IsHoliday_y', axis=1) _test = _test.rename(columns={'IsHoliday_x': 'IsHoliday'}) _test['IsHoliday'] = _test['IsHoliday'].map({False: 0, True: 1}) # Cálculo do número de semana do ano, e do ano, adicionando-os aos respetivos data frames _data['week_number'] = _data.apply( lambda row: h.calculate_week(row['Date']), axis=1) _data['year'] = _data.apply(lambda row: h.calculate_year(row['Date']), axis=1) _test['week_number'] = _test.apply( lambda row: h.calculate_week(row['Date']), axis=1) _test['year'] = _test.apply(lambda row: h.calculate_year(row['Date']), axis=1) # Drop de colunas não utilizadas _data = _data.drop('Date', axis=1) _data = _data.drop('Markdown1', axis=1) _data = _data.drop('Markdown2', axis=1) _data = _data.drop('Markdown3', axis=1) _data = _data.drop('Markdown4', axis=1) _data = _data.drop('Markdown5', axis=1) _test = _test.drop('Date', axis=1) _test = _test.drop('Markdown1', axis=1) _test = _test.drop('Markdown2', axis=1) _test = _test.drop('Markdown3', axis=1) _test = _test.drop('Markdown4', axis=1) _test = _test.drop('Markdown5', axis=1) # Indexação dos data frames por Store, Dept e Date _data.set_index(['Store', 'Dept', 'year', 'week_number']) _test.set_index(['Store', 'Dept', 'year', 'week_number']) # Substitui valores com strings vazias por valores NaN _data = _data.replace('', np.nan) _test = _test.replace('', np.nan) # Exportação dos data frames para ficheiros .csv _data.to_csv('../files/output/regression.data.csv') _test.to_csv('../files/output/test.csv')
def merge(path_wx, path_zfb, rules_list, save_path, start='2020/6/1 0:0:0', end='2020/7/1 0:0:0'): wx, zfb, hb, dt = list(zip(*rules_list)) data_wx = get_data(path_wx, start_line=16, encoding='utf-8') data_zfb = get_data(path_zfb, start_line=4, encoding='gbk') data_wx = transaction_checking_wx(data_wx, {k: v for k, v in zip(wx, dt)}) data_zfb = transaction_checking_zfb(data_zfb, {k: v for k, v in zip(zfb, dt)}) data_wx = extract_and_rename(data_wx, hb, {k: v for k, v in zip(wx, hb)}) data_zfb = extract_and_rename(data_zfb, hb, {k: v for k, v in zip(zfb, hb)}) data = pd.concat([data_wx, data_zfb], axis=0, sort=False) # 合并 data = data.sort_values(by='交易时间') # 按照交易时间排序 data = data.ix[(data['交易时间'] > start) & (data['交易时间'] < end)] # 筛选出满足时间要求的记录 print_data_info(data, '合并后账单') print('\n{}至{}期间数据统计:'.format(start, end)) data_analysis(data) data.to_excel(save_path, encoding='gbk')
def user_profile_setemail(token, email): ''' Update the authorised user's email address Parameters: token - The user's token that was generated from their user id email - The email the user wants to change to Returns: An empty dictionary Errors: InputError: The email is an invalid email The email has already been taken by another user ''' check_token(token) data = get_data() # Checking if the email is valid if not check(email): raise InputError(description="Invalid email") # Checking if email is taken for user in data['users']: if user['email'] == email: raise InputError(description="Email is already taken") # Setting the email u_id = get_user_from_token(token) for user in data['users']: if user['u_id'] == u_id: user['email'] = email return { }
def channels_listall(token): ''' Provide a list of all channels (and their associated details) Parameters: token - The user's token that was generated from their user id Returns: A dictionary containing a list of channels that are public or the user is in Errors: ''' check_token(token) data = get_data() c_list = [] u_id = get_user_from_token(token) # Iterating through the list of channels for channel in data['channels']: # If the user is in the channel append the channel to the list if find_id_in_list(channel['all_members'], u_id, 'u_id') or channel['is_public']: current_channel = { 'channel_id': channel['channel_id'], 'name': channel['name'], } c_list.append(current_channel) return {'channels': c_list}
def get_products(access_key, page, per_page, where, where_not, order, q, store_id): data = {} if page: data['page'] = page if per_page: data['per_page'] = per_page if where: data['where'] = where if where_not: data['where_not'] = where_not if order: data['order'] = order if q: data['q'] = q if store_id: data['store_id'] = store_id response = helper.get_data('products', data, access_key) product_list = ProductList(response['result']) return product_list
def channel_leave(token, channel_id): ''' User leaves a channel. Parameters: token - channel_id - Returns: {} Errors: InputError: Invalid channel id. AccessError: User is not inside channel. ''' check_token(token) data = get_data() if not find_id_in_list(data['channels'], channel_id, 'channel_id'): raise InputError(description='Channel does not exist') current_channel = data['channels'][channel_id - 1] target_u_id = get_user_from_token(token) if find_id_in_list(current_channel['all_members'], target_u_id, 'u_id'): target = get_user_data(target_u_id) user_info = { 'u_id': target['u_id'], 'name_first': target['name_first'], 'name_last': target['name_last'], } data['channels'][channel_id - 1]['all_members'].remove(user_info) return {} # the authorised user is not a member of channel with channel_id raise AccessError( description='The authorised user is not a member of this channel')
def user_profile(token, u_id): ''' For a valid user, returns information about their user id, email, first name, last name, handle, and image profile URL Parameters: token - The user's token that was generated from their user id u_id - The profile id number to be checked Returns: An dictionary containing all the users information Errors: InputError: Given a u_id for a non-existent user ''' check_token(token) data = get_data() # Finding the user for user in data['users']: if user['u_id'] == u_id: profile = { 'u_id': u_id, 'email': user['email'], 'name_first': user['name_first'], 'name_last': user['name_last'], 'handle_str': user['handle_str'], 'profile_img_url': user['profile_img_url'] } return { 'user': profile } # If u_id hasn't been found then it is obviously invalid raise InputError(description="Invalid u_id")
def user_remove(): '''admin removes a user from slackr''' data = get_data() payload = request.get_json() token = payload['token'] u_id = int(payload['u_id']) check_token(token) user_data = get_user_data(u_id) if user_data == {}: raise InputError(description='No such user exists') person_u_id = get_user_from_token(token) person_data = get_user_data(person_u_id) if person_data['permission_id'] != 1: raise AccessError(description='The authorised person is not an owner of slackr') user_info = { 'u_id': u_id, 'name_first': user_data['name_first'], 'name_last': user_data['name_last'], } for channel in data['channels']: if user_info in channel['owner_members']: curr_channel_id = channel['channel_id'] data['channels'][curr_channel_id - 1]['owner_members'].remove(user_info) data['channels'][curr_channel_id - 1]['all_members'].remove(user_info) elif user_info in channel['all_members']: curr_channel_id = channel['channel_id'] data['channels'][curr_channel_id - 1]['all_members'].remove(user_info) dumps({})
def test_start_game_success(): reset_data() data = get_data() user = auth_register('*****@*****.**', 'ilovecse', 'Jiaqi', 'Zhu') test_channel = channels_create(user['token'], 'test channel 1', True) start_game(user['token'], test_channel['channel_id']) assert data['hangman'][0]['is_active'] is True
def user_profile_setname(token, name_first, name_last): ''' Update the authorised user's first and last name Parameters: token - The user's token that was generated from their user id handle_str - The handle the user wants to change to Returns: An empty dictionary Errors: InputError: Either name is greater than 50 characters Either name is less than 1 characters ''' check_token(token) data = get_data() # Checking if the name is too long or short if len(name_first) > 50 or len(name_last) > 50: raise InputError(description="Name is too long. Maximum characer length 50.") if len(name_first) < 1 or len(name_last) < 1: raise InputError(description="Name is too short. Minimum character length 1.") # Setting the name u_id = get_user_from_token(token) for user in data['users']: if user['u_id'] == u_id: user['name_first'] = name_first user['name_last'] = name_last return { }
def main(): data = helper.get_data() dim = int(len(data[0]) ** (1/2)) data = data.reshape(len(data), dim, dim) label = helper.get_label() fn = choice[parse_args().train] return data, label, fn
def test_success_get_messages(): '''test for success''' reset_data() owner = auth_register("*****@*****.**", "avengers", "Natasha", "Romanova") test_channel = channels.channels_create(owner['token'], "Basement", True) u_id = owner['u_id'] channel_id = test_channel['channel_id'] message_1 = message_send(owner['token'], channel_id, "Hello World") message_2 = message_send(owner['token'], channel_id, "Yes Please") data = get_data() time1 = data['messages'][message_1['message_id'] - 1]['time_created'] time2 = data['messages'][message_2['message_id'] - 1]['time_created'] assert channel.channel_messages(owner['token'], channel_id, 0) == { 'messages': [{ 'message_id': message_2['message_id'], 'u_id': u_id, 'message': 'Yes Please', 'time_created': time1 }, { 'message_id': message_1['message_id'], 'u_id': u_id, 'message': 'Hello World', 'time_created': time2 }], 'start': 0, 'end': -1, }
def channel_join(token, channel_id): ''' User joins a channel. Parameters: token - channel_id - Returns: {} Errors: InputError: Invalid channel id. AccessError: Channel is private. ''' check_token(token) data = get_data() if not find_id_in_list(data['channels'], channel_id, 'channel_id'): # Channel ID is not a valid channel raise InputError(description='Channel does not exist') current_channel = data['channels'][channel_id - 1] target_u_id = get_user_from_token(token) target = get_user_data(target_u_id) if not current_channel['is_public'] and target['permission_id'] != 1: raise AccessError(description='Private channel') user_info = { 'u_id': target['u_id'], 'name_first': target['name_first'], 'name_last': target['name_last'], } data['channels'][channel_id - 1]['all_members'].append(user_info) return {}
def main(): args = arg.preprocess_args() data = helper.get_data(folder=args.folder, data_file=args.data) print('data_loaded') append(data, args) for i in range(10): append(data, args, str(i), '_')
def standup_active(token, channel_id): ''' For a given channel, return whether a standup is active in it, and what time the standup finishes. If no standup is active, then time_finish returns None Parameters: token - The user's token that was generated from their user id channel_id - The id of the channel Returns: A dictionary containing the time the standup will finish and whether a standup is active Errors: InputError: Channel ID is not a valid channel ''' check_token(token) data = get_data() # Check if channel_id is valid if not find_id_in_list(data['channels'], channel_id, 'channel_id'): raise InputError(description="Channel ID is not a valid channel") curr_time = datetime.utcnow() time_stamp = int(curr_time.replace(tzinfo=timezone.utc).timestamp()) standup = data['standup'] if time_stamp > standup['time_finish'] or standup['channel_id'] != channel_id: is_active = False time_finish = None else: is_active = True time_finish = standup['time_finish'] return { 'is_active': is_active, 'time_finish': time_finish }
def users_all(token): ''' Returns a list of all users and their associated details Parameters: token - The user's token that was generated from their user id Returns: A dictionary containing a list of users Errors: ''' check_token(token) data = get_data() u_list = [] # Adds all the users in data to u_list for user in data['users']: current_user = { 'u_id': user['u_id'], 'email': user['email'], 'name_first': user['name_first'], 'name_last': user['name_last'], 'handle_str': user['handle_str'], 'profile_img_url': user['profile_img_url'], } u_list.append(current_user) return { 'users': u_list }
def user_profile_sethandle(token, handle_str): ''' Update the authorised user's handle (i.e. display name) Parameters: token - The user's token that was generated from their user id handle_str - The handle the user wants to change to Returns: An empty dictionary Errors: InputError: The handle name is greater than 20 characters The handle name is less than 2 characters ''' check_token(token) data = get_data() # Checking if the handle is too long or short if len(handle_str) > 20: raise InputError(description="Handle is too long. Maximum characer length 20.") if len(handle_str) < 2: raise InputError(description="Handle is too short. Minimum character length 2.") #Check if handle is already taken by another user for user in data['users']: if user['handle_str'] == handle_str: raise InputError(description="Handle is already taken") # Setting the handle u_id = get_user_from_token(token) for user in data['users']: if user['u_id'] == u_id: user['handle_str'] = handle_str return { }
def test_guess_word(): reset_data() data = get_data() user = auth_register('*****@*****.**', 'ilovecse', 'Jiaqi', 'Zhu') test_channel = channels_create(user['token'], 'test channel 1', True) hangman_package = { 'channel_id': test_channel['channel_id'], 'is_active': True, 'guessed': False, 'word': 'hello', 'letters_guessed': [], 'tries': 10 } data['hangman'].append(hangman_package) check_word(user['token'], 'a', 1) check_word(user['token'], 'b', 1) check_word(user['token'], 'c', 1) check_word(user['token'], 'e', 1) check_word(user['token'], 'd', 1) check_word(user['token'], 'o', 1) check_word(user['token'], 't', 1) check_word(user['token'], 'n', 1) check_word(user['token'], 'k', 1) check_word(user['token'], 'm', 1) check_word(user['token'], 'p', 1) assert check_word(user['token'], 'q', 1) == '\n_e__o\nYou have run out of guesses and you haven\'t guessed the word. :(\nThe word is hello.\n_ _ _ \n |' + ' ' + ' | \n | O \n | /|\ \n | / \ \n|_ \n'
def get_product(access_key, product_id, json_flag): response = helper.get_data('products/' + product_id, {}, access_key) product = Product(response['result']) if json_flag: return json.dumps(response, indent=4) return product
def test_pin(): reset_data() owner = auth.auth_register("*****@*****.**", "hellocse1", "Sinha", "Nawa") channels.channels_create(owner['token'], "New Channel", True) message.message_send(owner['token'], 1, "test") message.message_pin(owner['token'], 1) data = get_data() msg = data['messages'][0] assert msg['is_pinned'] == 1
def test_react(): reset_data() owner = auth.auth_register("*****@*****.**", "hellocse1", "Sinha", "Nawa") channels.channels_create(owner['token'], "New Channel", True) message.message_send(owner['token'], 1, "test") message.message_react(owner['token'], 1, 1) data = get_data() msg = data['messages'][0] assert msg['react'] == [{'react_id': 1, 'u_id': 1}]
def server_side_event(scheduled=True, supplierID=None): """ Function to publish server side event """ with app.app_context(): channel = f"supplierID_{supplierID}" print(channel) sse.publish(next(get_data()), type='newOrder', channel=channel) if scheduled: print("Event Scheduled at ", datetime.datetime.now()) else: print(f"Event triggered for channel=supplierID_{supplierID} at ", datetime.datetime.now())
def message_sendlater(token, channel_id, message, time_sent): ''' Send a message from authorised_user to the channel \ specified by channel_id automatically at a specified time in the future Parameters: token - The user's token that was generated from their user id channel_id - The id of the channel the user wishes to message message - The message the user wishes to send time_sent Returns: A dictionary containing a bool is_sucess Errors: InputError: Channel ID is not a valid channel Message is more than 1000 characters Time sent is a time in the past AccessError: The authorised user has not joined the channel they are trying to post to ''' check_token(token) data = get_data() #check channel exist if not find_id_in_list(data['channels'], channel_id, 'channel_id'): raise InputError(description="Channel does not exist") if len(message) > 1000: raise InputError(description="Message is too long") curr_time = datetime.utcnow() time_stamp = int(time.mktime(curr_time.timetuple())) if time_sent < time_stamp: raise InputError(description="Cannot send messages in the past") curr_channel = data['channels'][int(channel_id) - 1] u_id = get_user_from_token(token) if not find_id_in_list(curr_channel['all_members'], u_id, 'u_id'): raise AccessError("Cannot send messages in channels you're not in") message_id = get_max_msg_id() + 1 new_message = { 'u_id': u_id, 'channel_id': channel_id, 'message_id': message_id, 'message': message, 'time_created': time_sent, 'send_later': True, 'react': [], 'is_pinned': False } curr_time = datetime.utcnow() time_stamp = int(time.mktime(curr_time.timetuple())) timer = threading.Timer(time_sent - time_stamp, data['messages'].insert(0, new_message)) timer.start() return {"message_id": new_message['message_id']}
def auth_password_reset(reset_code, new_password): '''This program resets a users password given a reset code''' data = get_data() if len(new_password) < 6: raise InputError("Invalid password, must be longer") for user in data['users']: # Check if the reset code given was an currently active for a user if user['reset_code'] == reset_code: user['password'] = str(hash_password(new_password)) return {} raise InputError("Reset code incorrect")
def auth_logout(token): '''This program logs a user out''' check_token(token) data = get_data() u_id = get_user_from_token(token) for user in data['users']: if user['u_id'] == u_id: # Set the specified users login flag to 0 to log them off if user['logged_in'] == 1: user['logged_in'] = 0 return {'is_success': True} return {'is_success': False}
def main(): for i, index in enumerate(range(code_length), 1): model_path = 'model/%s/' % index model_file_name = os.path.join(model_path, 'model') nodes_file_name = os.path.join(model_path, 'nodes.pk') if not os.path.exists(nodes_file_name): create_model(model_path) recent_accuracy = deque(maxlen=stat_length) graph = tf.Graph() config = tf.ConfigProto(intra_op_parallelism_threads=cpu_to_use) session = tf.Session(graph=graph, config=config) with session.graph.as_default(): # 导入模型定义 saver = tf.train.import_meta_graph(model_file_name + '.meta') saver.restore(session, model_file_name) nodes = pickle.load(open(nodes_file_name, "rU")) x = session.graph.get_tensor_by_name(nodes['x']) y = session.graph.get_tensor_by_name(nodes['y']) keep_prob = session.graph.get_tensor_by_name(nodes['keep_prob']) loss = session.graph.get_tensor_by_name(nodes['loss']) accuracy = session.graph.get_tensor_by_name(nodes['accuracy']) optimizer = session.graph.get_operation_by_name(nodes['optimizer']) # 训练模型 for j, step in enumerate(range(max_train_time), 1): begin_time = datetime.now() imageList, codeList = get_data(100) codeList = map(lambda x: x[index], codeList) x_data = map(image_to_vector, imageList) y_data = map(code_to_vector, codeList) _, l, a = session.run([optimizer, loss, accuracy], feed_dict={ x: x_data, y: y_data, keep_prob: .75 }) if step % 10 == 0: saver.save(session, model_file_name) end_time = datetime.now() dt = end_time - begin_time recent_accuracy.append(a) mean_of_accuracy = pd.Series(recent_accuracy).mean() format_string = '[%d(%d/%d): %d/%d]: loss: %.2f, accuracy: %.2f, accuracy mean: %.2f(<%.2f?), time: %.2f' print format_string % (index, i, code_length, j, max_train_time, l, a, mean_of_accuracy, accuracy_level, dt.total_seconds()) if len(recent_accuracy) == stat_length: if mean_of_accuracy >= accuracy_level: break
def test_another_react(): reset_data() owner = auth.auth_register("*****@*****.**", "hellocse1", "Sinha", "Nawa") member = auth.auth_register("*****@*****.**", "hellocse", "jiaqi", "zhu") channels.channels_create(owner['token'], "New Channel", True) channel.channel_invite(owner['token'], 1, member['u_id']) message.message_send(owner['token'], 1, "test") message.message_react(member['token'], 1, 1) data = get_data() msg = data['messages'][0] assert msg['react'] == [{'react_id': 1, 'u_id': 2}]
def user_profile_uploadphoto(token, img_url, x_start, y_start, x_end, y_end): ''' Given a URL of an image on the internet, crops the image within bounds (x_start, y_start) and (x_end, y_end). Position (0,0) is the top left Parameters: token - The user's token that was generated from their user id img_url - The url containing the image the user wants to upload as their profile picture x_start - The left side of the area the user wishes to crop the photo at the url x_end - The right side of the area the user wishes to crop the photo at the url y_start - The top of the area the user wishes to crop the photo at the url y_end - The bottom of the area the user wishes to crop the photo at the url Returns: An empty dictionary Errors: InputError: The img_url returns a non 200 response The image at the url is not a jpg The start values for the crop are greater then or equal to then end values One of the values given for the crop are outside the bounds of the photo ''' check_token(token) data = get_data() if not os.path.exists('static/images'): os.makedirs('static/images') filename = 'images/' + uuid.uuid4().hex + '.jpg' response = requests.get(img_url) if not response.status_code == 200: raise InputError(description="img_url does not return a 200 status code") urllib.request.urlretrieve(img_url, 'static/' + filename) img = Image.open(BytesIO(response.content)) if img.format != ('JPEG' or 'JPG' or 'jpg' or 'jpeg'): raise InputError(description="Image is not a JPG") width, height = img.size if x_start >= x_end or y_start >= y_end: raise InputError(description="End coordinates must be greater that start coordinates ") if ((x_start or y_start or x_end or y_end) < 0 or (x_start or x_end) > width or (y_start or y_end) > height): raise InputError(description="Crop is not within the bounds of the image") area = (x_start, y_start, x_end, y_end) img = img.crop(area) img.save('static/' + filename) u_id = get_user_from_token(token) img_url = url_for('static', filename=filename, _external=True) for user in data['users']: if user['u_id'] == u_id: user['profile_img_url'] = img_url return { }
def writeScaling(symmFuncts, info, pe, atomCount, seed, scalingData, nnInfoFile, runnerFolder): """ Given an array of symmetry functions 'symmFuncts' The results of applying these symmetry functions to each element in 'info' The list of potential energies 'pe' And the number of atoms 'atomCount' Writes the atom information to "runner/scaling.data" And identical symmetry function information to both "runner/input.nn" and "runner/input.nn.RuNNer++" Details on these outputs can be found in "runner_input.txt" and "runner_input_template.txt" """ defaults = {"verbose" : False} data = helper.combine_dicts(scalingData, helper.get_data(nnInfoFile, defaults)) if data["layers"] == 1: data["nodes"] = [data["nodes"]] data["activationfunctions"] = [data["activationfunctions"]] with open(runnerFolder + "input.nn", "w") as f: # Write Settings with open("bin/scalingBasics.txt", "r") as fi: for line in fi: f.write(line) f.write("number_of_elements\t1\t# number of elements\n") f.write("elements\tC\t# specification of elements\n") f.write("random_seed\t" + str(seed) + "\t# seed for initial random weight parameters and train/test splitting\n") f.write("cutoff_type\t2\t# type of cutoff function\n") f.write("global_hidden_layers_short\t" + str(data["layers"]) + "\t# number of hidden layers\n") f.write("global_nodes_short\t" + " ".join(map(str, data["nodes"])) + "\t# number of nodes in hidden layers\n") f.write("global_activation_short\t" + " ".join(map(str, data["activationfunctions"])) + " l\t# The activation function used by each layer\n") f.write("test_fraction\t" + str(1 - data["trainratio"]) + "\t# threshold for splitting between fitting and test set\n") f.write("scale_min_short\t" + str(data["scalemin"]) + "\t# minimum value for scaling\n") f.write("scale_max_short\t" + str(data["scalemax"]) + "\t# maximum value for scaling\n") f.write("\n") for funct in symmFuncts: f.write(str(funct) + "\n") call(["cp", runnerFolder + "input.nn", "runner/input.nn.RuNNer++"]) with open(runnerFolder + "scaling.data", "w") as f: # Write Scaling Information count = 0 for i in range(1): for j in range(len(symmFuncts)): f.write(str(i + 1) + " ") f.write(str(j + 1) + " ") f.write(symmFuncts[j].data_info()) f.write("\n") f.write(str(sum(pe)/len(pe)/atomCount) + " " + str((max(pe)-min(pe))/atomCount))
def combine_several_data(show=False): symbols = ["BBCA.JK", "BBRI.JK"] start_date = "2020-01-01" end_date = "2020-02-01" dates = pd.date_range(start_date, end_date) df = get_data(symbols, dates) if show: print(df) return df
def message_react(token, message_id, react_id): ''' Given a message within a channel the authorised user is part of, add a "react" to \ that particular message Parameters: token - The user's token that was generated from their user id message_id - The id of the message to be reacted to react_id - What type of reaction is shown Returns: An empty dictionary Errors: InputError: Message_id is not a valid message within a channel that the authorised user has joined React_id is not a valid React ID. The only valid react ID the frontend has is 1 Message with ID message_id already contains an active React with ID react_id \ from the authorised user ''' check_token(token) data = get_data() # The only valid react ID the frontend has is 1 if react_id != 1: raise InputError(description="Invalid React ID") if not find_id_in_list(data['messages'], message_id, 'message_id'): raise InputError( description="Message_id is not a valid message within a \ channel that the authorised user has joined") for message in data['messages']: if message['message_id'] == message_id: msg = message u_id = get_user_from_token(token) channel_id = msg['channel_id'] curr_channel = data['channels'][channel_id - 1] if not find_id_in_list(curr_channel['all_members'], u_id, 'u_id'): raise InputError( description="Message_id is not a valid message within a \ channel that the authorised user has joined") # check if user has already reacted for react in msg['react']: if react['u_id'] == u_id: raise InputError( description="User has already reacted to this message") new_react = {'react_id': react_id, 'u_id': u_id} for message in data['messages']: if message['message_id'] == message_id: message['react'].append(new_react) return {}
def message_send(token, channel_id, message): ''' Send a message from authorised_user to the channel specified by channel_id Parameters: token - The user's token that was generated from their user id channel_id - The id the user wishes to message message - The message the user wishes to send Returns: A dictionary containing a message_id Errors: InputError: Message is more than 1000 characters AccessError: The authorised user has not joined the channel they are trying to post to ''' # Check if user is valid check_token(token) # Check if message is too long if len(message) > 1000: raise InputError(description="Message is too long") data = get_data() u_id = get_user_from_token(token) # Find the channel with channel_id curr_channel = data['channels'][channel_id - 1] # Check if user is in this channel if not find_id_in_list(curr_channel['all_members'], u_id, 'u_id') and u_id != 0: raise AccessError(description="User is not in channel") # Get the time when message is sent curr_time = datetime.utcnow() timestamp = curr_time.replace(tzinfo=timezone.utc).timestamp() # Get message id based on the lastest (max) message id message_id = get_max_msg_id() + 1 new_message = { 'u_id': u_id, 'channel_id': channel_id, 'message_id': message_id, 'message': message, 'time_created': timestamp, 'send_later': False, 'react': [], 'is_pinned': False } # insert new message to the start of list data['messages'].insert(0, new_message) return {'message_id': message_id}
def main(): input_size = 10000 embedding_size = 24 output_size = 5 learning_rate = 0.01 oov_token = '<OOV>' loss = 'sparse_categorical_crossentropy' optimizer = Adam(learning_rate=learning_rate) epochs = 1 train_val_split = 0.2 sentences, sentiments = helper.get_data('data/train.tsv') sentences = helper.remove_stopwords(sentences, 'data/stopwords') max_length = len(max(sentences, key=len)) tokenizer = helper.get_tokenizer(input_size, oov_token, sentences) padded_sentences = helper.convert_to_sequences(tokenizer, sentences, max_length) train_padded_sentences, validation_padded_sentences, train_sentiments, validation_sentiments = \ train_test_split( padded_sentences, sentiments, test_size=train_val_split, random_state=42 ) train_padded_sentences = np.array(train_padded_sentences) train_sentiments = np.array(train_sentiments) validation_padded_sentences = np.array(validation_padded_sentences) validation_sentiments = np.array(validation_sentiments) layers = [ tf.keras.layers.Embedding(input_size, embedding_size, input_length=max_length), # tf.keras.layers.LSTM(32), # tf.keras.layers.Conv1D(filters=64, kernel_size=5, activation='relu'), # tf.keras.layers.MaxPooling1D(pool_size=4), # tf.keras.layers.Dropout(0.2), tf.keras.layers.GlobalAveragePooling1D(), tf.keras.layers.Dense(units=24, activation='relu'), tf.keras.layers.Dense(units=output_size, activation='softmax') ] model = MODEL(input_size, output_size, layers, loss, optimizer, epochs) model.__train__(train_padded_sentences, train_sentiments, validation_padded_sentences, validation_sentiments) model.__plot_graph__('accuracy')
# !/usr/bin/env python # -*- coding: utf-8 -*- __author__ = "Roman I Pozdeev" from PyQt4 import QtGui import helper print(helper.get_data("global", "setup", "settings.ini"))
def main(): if len(sys.argv) < 4: print("Give the train/test, symmetry function, and neural network information files") return # Acquire and read data defaults = {"generateData" : True, "trainRatio" : 0.9, "runnerScaling" : "../runner/", "verbose" : False} data = helper.get_data(sys.argv[1], defaults) verbose = data["verbose"] runnerFolder = data["runnerfolder"] data_file = data["datafile"] should_delete = raw_input("Cleaning (deleting all files) from " + data["trainfolder"] + " Type 'yes' to confirm:\n") if should_delete == "yes": # Stolen from: https://stackoverflow.com/questions/185936/delete-folder-contents-in-python folder = data["trainfolder"] for the_file in os.listdir(folder): file_path = os.path.join(folder, the_file) try: if os.path.isfile(file_path): os.unlink(file_path) except Exception as e: print(e) should_delete2 = raw_input("Cleaning (deleting all files) from " + data["testfolder"] + " Type 'yes' to confirm:\n") if should_delete2 == "yes": folder = data["testfolder"] for the_file in os.listdir(folder): file_path = os.path.join(folder, the_file) try: if os.path.isfile(file_path): os.unlink(file_path) except Exception as e: print(e) if data["generatedata"]: print("Generating lammps data") if not (data.has_key("lammpsfile") and (data.has_key("seedrange") or data.has_key("seed"))): print("Give a lammpsFile and seedRange or seed if you want to generate data") return if data.has_key("seedrange"): seed = random.randint(data["seedrange"][0], data["seedrange"][1]) else: seed = data["seed"] generateData(data["lammpsfile"], seed, data_file, runnerFolder) print("Reading atomic data from " + data_file) atomData, thermoData = readAtomData(data_file) print("Shuffling data") z = list(zip(atomData, thermoData)) random.shuffle(z) atomData, thermoData = zip(*z) #Get Functions functs = getSymmFunctions(sys.argv[2]) """functs = [] # UPDATEEEEEEE if isinstance(data["2functions"], list): c_temp = ["C" for _ in range(len(data["2functions"]))] else: c_temp = "C" if data.has_key("2functions"): functs += get2Functs(data["2functions"], c_temp, c_temp, data["2Rc"], data["2eta"], data["2Rs"]) if data.has_key("3functions"): functs += get3Functs(data["3functions"], c_temp, c_temp, c_temp, data["3Rc"], data["3sigma"], data["3count"])""" functs.sort() # Write train and test files trainFolder = data["trainfolder"] testFolder = data["testfolder"] trainRatio = data["trainratio"] trainCutoff = int(trainRatio * len(atomData)) + 1 i = 0 print("Calculating scaling metadata for " + str(len(atomData)) + " timesteps") getScalingData(atomData, functs) print("Writing " + str(trainCutoff) + " training files and " + str(len(atomData)-trainCutoff) + " testing files") info = [list([[0, 0, int(1e10), int(-1e10)] for _ in range(len(functs))])] metaIndex = 0 scale_min = data["scalemin"] scale_max = data["scalemax"] while i < trainCutoff: if verbose: print("Writing Training Batch: " + str(i+1)) metaIndex = writeBatch(trainFolder + str(i+1) + ".nndata", len(atomData[i][0]), thermoData[i], functs, metaIndex, scale_min, scale_max) i += 1 while i < len(atomData): if verbose: print("Writing Test Batch: " + str(i+1-trainCutoff)) metaIndex = writeBatch(testFolder + str(i+1-trainCutoff) + ".nndata", len(atomData[i][0]), thermoData[i], functs, metaIndex, scale_min, scale_max) i += 1 print("Writing function information to scaling.data") writeScaling(functs, info, thermoData, len(atomData[0]), seed, data, sys.argv[3], runnerFolder)
import helper import json data_path = helper.get_data() #test = json.load(data_path)