async def beanbtn(conv, SENDER, msg): '''定义bean脚本按钮''' from lib import get_data, show_data try: markup = [Button.inline(cntr, data=cntr) for cntr in containers] markup.append(Button.inline('取消', data='cancel')) markup = split_list(markup, 3) msg = await client.edit_message(msg, '请选择容器:', buttons=markup) date = await conv.wait_event(press_event(SENDER)) res = bytes.decode(date.data) if res == 'cancel': msg = await client.edit_message(msg, '对话已取消') conv.cancel() return None, None else: text = show_data(get_data(containers[res])) # msg = await conv.send_message(text) msg = await client.edit_message(msg, text) conv.cancel() return None, None except exceptions.TimeoutError: msg = await client.edit_message(msg, '选择已超时,对话已停止') return None, None except Exception as e: msg = await client.edit_message( msg, 'something wrong,I\'m sorry\n' + str(e)) logger.error('something wrong,I\'m sorry\n' + str(e)) return None, None
def main(data_name): if (data_name == "fer2013"): name = "fer2013" data_file_path = "../../../data/fer2013/fer2013.csv" else: name = "icv_mefed" train_path = "../../../data/icv_mefed/training/" test_path = "../../../data/icv_mefed/testing/" # Obtain the data from the path provided # data = get_data(name=name, data_file_path=data_file_path) # Obtain the data from the path provided train_data = get_data(name=name, data_file_path=train_path + 'training.txt') training_data, training_labels = make_sets(train_data, train_path, extract_landmarks=False) test_data = get_data(name=name, data_file_path=test_path + 'testing.txt') testing_data, testing_labels = make_sets(test_data, test_path, extract_landmarks=False) # Generate training and test sets from the data # X_train, Y_train, X_test, Y_test = generate_data_split(data=data, num_of_classes=7, name=name) # Turn the training set into a numpy array for the classifier X_train = np.array(training_data) Y_train = np.array(training_labels) X_test = np.array(testing_data) Y_test = np.array(testing_labels) # Pre-process the image data X_train, X_test = normalize_data(X_train, X_test) # Generate or load trained model model = generate_model(X_train, Y_train) # Evaluate model evaluate_model(model, X_train, Y_train, X_test, Y_test) # Save model to disk save_model(name="cnn", model=model)
def main(data_name): #clf = SVC(kernel='linear', probability=True, tol=1e-3) # , verbose = True) #Set the classifier as a support vector machines with polynomial kernel if (data_name == "fer2013"): name = "fer2013" data_file_path = "../../../data/fer2013/fer2013.csv" else: name = "icv_mefed" train_path = '../../../data/icv_mefed/training/' test_path = '../../../data/icv_mefed/testing/' # Obtain the data from the path provided train_data = get_data(name=name, data_file_path=train_path + 'training.txt') training_data, training_labels = make_sets(train_data, train_path) test_data = get_data(name=name, data_file_path=test_path + 'testing.txt') testing_data, testing_labels = make_sets(test_data, test_path) # Turn the training set into a numpy array for the classifier X_train = np.array(training_data) Y_train = np.array(training_labels) X_test = np.array(testing_data) Y_test = np.array(testing_labels) # Pre-process the image data X_train, X_test = normalize_data(X_train, X_test) # Generate or load trained model model = generate_model(X_train, Y_train) # Evaluate model evaluate_model(model, X_train, Y_train, X_test, Y_test) # Save model to disk save_model(name="svm", model=model)
def __init__(self): self.loans = get_data() self.loans["Unique title"] = [ "".join([i for i in title if i != "-" and not i.isdigit()]).rstrip().lower() for title in self.loans["Loan title"] ] self.loans["Unique title"] = [ a[:-5] if (a[-5:] == " loan" and a[:-5] in self.loans["Unique title"].values) else a for a in self.loans["Unique title"] ] self.live_loans = self.loans[self.loans["Loan status"].isin( ["Live", "Late", "Processing"])] self.i = 0 self.total_remaining_principal = self.live_loans[ "Principal remaining"].sum()
else: d1 = date(2011,7,28) d2 = d1 + td(days=8) d1 = date(2014,3,28) d2 = date(2014,3,28) delta = d2 - d1 for i in range(delta.days + 1): my_date = d1 + td(days=i) fecha = my_date.strftime("%d/%m/%Y") print fecha buscar(fecha) filename = os.path.join(config.base_folder, "visitas.db") db = dataset.connect("sqlite:///" + filename) table = db['visitas'] print "Getting data from our json file" items = lib.get_data() print "Uploading data from our json file" for i in items: if not table.find_one(sha1=i['sha1']): print i['sha1'], i['date'] table.insert(i) print "Recreating website" lib.recreate_website()
#!/usr/bin/env python3 from datetime import datetime import matplotlib.pyplot as plt from pandas.plotting import register_matplotlib_converters from lib import get_data if __name__ == "__main__": data = get_data() data["due_date"] = [datetime.strptime(a, "%Y-%m-%d") for a in data["due_date"]] data = data.sort_values(by=["due_date", "loan_part_id"]) data["profit"] = data["pay_interest"] - data["lender_fee"] data["income"] = data["pay_principal"] + data["profit"] data["profit_cumulative"] = data["profit"].cumsum() data["income_cumulative"] = data["income"].cumsum() register_matplotlib_converters() plt.plot(data["due_date"], data["income_cumulative"], label="Total") plt.plot(data["due_date"], data["profit_cumulative"], label="Interest") plt.xlabel("Date") plt.ylabel("Income / £") plt.legend() plt.title("Cumulative income forecast") plt.show()