def create_reservoir(): connectivity = default_input('connectivity', 2) n_nodes = default_input('n_nodes', 100) input_connectivity = default_input('input_connectivity', 50) rbn_reservoir = rbn_node.RBNNode(connectivity=connectivity, output_dim=n_nodes, input_connectivity=input_connectivity) return rbn_reservoir
def create_dataset(): dataset_type = default_input('Dataset [temporal_parity, temporal_density]', 'temporal_parity') n_datasets = default_input('Datasets', 10) task_size = default_input('Dataset length', 200) window_size = default_input('Window size', 3) datasets = temporal.create_datasets(n_datasets, task_size=task_size, window_size=window_size, dataset_type=dataset_type) dataset_description = '[{}-{}-{}-{}]'.format(dataset_type, n_datasets, task_size, window_size) return datasets, dataset_description
def create_dataset(): dataset_type = default_input( 'Dataset [temporal_parity, temporal_density]', 'temporal_parity') n_datasets = default_input('Datasets', 10) task_size = default_input('Dataset length', 200) window_size = default_input('Window size', 3) datasets = temporal.create_datasets( n_datasets, task_size=task_size, window_size=window_size, dataset_type=dataset_type) dataset_description = '[{}-{}-{}-{}]'.format( dataset_type, n_datasets, task_size, window_size) return datasets, dataset_description
def erb(): working_dir = get_working_dir() log.setup(logging.DEBUG, path=working_dir) window_size = default_input('Window size', 3) n_nodes = default_input('N Nodes', 100) connectivity = default_input('Connectivity', 2) f = default_input('From', 0) t = default_input('To', n_nodes + 1) s = default_input('Step', n_nodes / 10) r = range(f, t, s) distribution = estimate_reservoir_distribution( 30, n_nodes, connectivity, r, window_size) name = '[NN:{}-WS:{}-K:{}]-distribution'.format(n_nodes, window_size, connectivity) dump(distribution, name, folder=working_dir)
def erb(): working_dir = get_working_dir() log.setup(logging.DEBUG, path=working_dir) window_size = default_input('Window size', 3) n_nodes = default_input('N Nodes', 100) connectivity = default_input('Connectivity', 2) f = default_input('From', 0) t = default_input('To', n_nodes + 1) s = default_input('Step', n_nodes / 10) r = range(f, t, s) distribution = estimate_reservoir_distribution(30, n_nodes, connectivity, r, window_size) name = '[NN:{}-WS:{}-K:{}]-distribution'.format(n_nodes, window_size, connectivity) dump(distribution, name, folder=working_dir)
# Optionally dump newly created flow if not user_denies('Pickle reservoir and readout layer?'): flow_description = '{}-{}-[ACC:{}]'.format( dataset_description, rbn_reservoir.describe(), accuracy) dump(rbn_reservoir, flow_description + '-reservoir', folder=working_dir) dump(readout, flow_description + '-readout', folder=working_dir) # Evolve other reservoirs with similar dynamics if not user_denies('Use readout layer to evolve similar rbn_reservoirs?'): n_nodes = readout.input_dim connectivity = default_input('Connectivity', 2) n_runs = default_input('How many GA runs?', 1) for i in range(n_runs): reservoir_problem = RBNReservoirProblem( n_nodes, connectivity, readout, test_dataset) generation, adults = solve(reservoir_problem, path=working_dir) fitnesses = [x.fitness for x in adults] top3 = fitnesses[-3:] top3.reverse() mean = np.mean(fitnesses) std = np.std(fitnesses) description = (
logging.info("Accuracy: {} on {} items.".format( accuracy, len(reservoir_input))) # Optionally dump newly created flow if not user_denies('Pickle reservoir and readout layer?'): flow_description = '{}-{}-[ACC:{}]'.format( dataset_description, rbn_reservoir.describe(), accuracy) dump(rbn_reservoir, flow_description + '-reservoir', folder=working_dir) dump(readout, flow_description + '-readout', folder=working_dir) # Evolve other reservoirs with similar dynamics if not user_denies('Use readout layer to evolve similar rbn_reservoirs?'): n_nodes = readout.input_dim connectivity = default_input('Connectivity', 2) n_runs = default_input('How many GA runs?', 1) for i in range(n_runs): reservoir_problem = RBNReservoirProblem(n_nodes, connectivity, readout, test_dataset) generation, adults = solve(reservoir_problem, path=working_dir) fitnesses = [x.fitness for x in adults] top3 = fitnesses[-3:] top3.reverse() mean = np.mean(fitnesses) std = np.std(fitnesses) description = (
def update_expense(expense): if expense.deleted_at: return expense if expense.payment: return expense try: ignore_old_details = config.ignore_old_remarks except Exception: ignore_old_details = False try: skip_completed = config.skip_completed except Exception: skip_completed = False s = splitwise.Splitwise(config.consumer_key, config.consumer_secret, api_key=config.API_key) print( '---------------------------------------------------------------------------------------' ) print_formatted_text( HTML('<ansigreen>' + _('Description: ') + '</ansigreen>' + get_display(expense.description) + '(' + utils.to_simple_local_date_string(expense.date) + ')')) if utils.is_json(expense.details): if skip_completed: return expense det = json.loads(expense.details) upgrade = det['is_boat_upgrade'] if upgrade: upgrade = 'Y' else: upgrade = 'N' ug = utils.default_input(_('Is considered upgrade?') + '(' + upgrade + ')', upgrade, ['Y', 'N'], ignore_case=True) category = det['category'] category = utils.default_input(_('Expense category?'), category, categories, ignore_case=True) store = det['store'] store = utils.session_input(_('Store?'), session, store) remarks = det['remarks'] else: print_formatted_text( HTML('<ansigreen>' + _('Details: ') + '</ansigreen>' + get_display(str(expense.details)))) ug = utils.default_input(_('Is considered upgrade?') + '(Y/N)', 'N', ['Y', 'N'], ignore_case=True) category = utils.default_input(_('Expense category?'), 'Other', options=categories, ignore_case=True) store = utils.session_input(_('Store?'), session, '') remarks = "" if ignore_old_details else expense.details ug = utils.convert_yn_to_bool(ug) new_details = create_details_json(remarks, category, store, ug) e = Expense() e.id = expense.id e.setDetails(new_details) return s.updateExpense(e)