Beispiel #1
0
    def final_save(self, model_name, save_dir):
        """
        Print and save the best results

        Args:
            model_name:
            save_dir: directory for saving results
        """

        if (self.save_precision is not None) and (self.save_recall
                                                  is not None):
            tp.banner('This is the best results!')
            mean = (self.save_precision[100] + self.save_precision[200] +
                    self.save_precision[300]) / 3
            data = [[
                self.save_precision[100], self.save_precision[200],
                self.save_precision[300], mean, self.best_auc, self.f1_score
            ]]
            headers = ['P@100', 'P@200', 'P@300', 'Mean', 'AUC', 'Max F1']
            tp.table(data, headers)

            ensure_folder(save_dir)
            np.save(os.path.join(save_dir, '{}_recall.npy'.format(model_name)),
                    self.save_recall[:2000])
            np.save(
                os.path.join(save_dir, '{}_precision.npy'.format(model_name)),
                self.save_precision[:2000])
        else:
            logger.error('No model result to save')
def __printTable__(title, data, headers):

    tp.banner(title)
    columns_max_width = __calculateColumnsMaxWidth__(headers, data)
    tp.table(data=data,
             headers=headers,
             width=columns_max_width,
             style='fancy_grid')
Beispiel #3
0
def test_banner():
    """Tests the banner function"""
    output = StringIO()
    banner('hello world', style='clean', width=11, out=output)
    assert output.getvalue() == ' ─────────── \n hello world \n ─────────── \n'

    output = StringIO()
    banner('!', style='banner', width=1, out=output)
    assert output.getvalue() == '╒═╕\n│!│\n╘═╛\n'
Beispiel #4
0
def main(action, search_query, ids, start, max_result, sort_by, sort_order, v,
         output):
    """A cli package for accessing and quering arxiv papers."""
    if v: click.echo("Building query parameters \n")
    kwargs = {
        "start": start,
        "max_result": max_result,
        "sort_by": sort_by,
        "sort_order": sort_order,
        "search_query": search_query,
        "ids": ids
    }
    print(kwargs)
    if v: click.echo("Parameters built \n")
    if action == "query":
        arxiv = Arxiv(**kwargs)
        text = """The provided query parameters are {0}  .\nThe request url is {1} \n""".format(
            arxiv.query_url_args, arxiv.request_url)
        if v: click.echo(text)
        query_result = arxiv.query()
        if v:
            click.echo("Query Complete  \n{} result \n".format(
                "Printing raw as list" if output == "list" else
                "Printing raw as json" if output == "json" else "Tabulating"))
        if output == "list":
            print(query_result)
        elif output == "json":
            import json
            output = json.dumps(query_result)
            print(output)
        else:
            click.echo(
                "Tabulating is currently not supported. It should be by tomorrow. Rolling back to pprint"
            )
            pprint.pprint(query_result)

    if action == "download":
        arxiv = Arxiv(**kwargs)
        text = """The provided query parameters are {0}  .\nThe request url is {1} \n""".format(
            arxiv.query_url_args, arxiv.request_url)
        if v: click.echo(text)
        arxiv_query = arxiv.query()[0]
        if v: click.echo("Query Complete\n")
        if v: click.echo("Downloading Paper\n")
        arxiv.download(arxiv_query)
        tableprint.banner("Successfully Downloaded Paper : {0}".format(
            arxiv_query['title']))

    if action == "get_paper":
        arxiv = Arxiv(**kwargs)
        arxiv_query = arxiv.query()[0]
        pprint.pprint(arxiv_query)
Beispiel #5
0
def ANTCOL(G, ncycles, nants, alpha, beta, rho, k):
    """
	Procedimiento principal para la metaheurística descrita en el artículo.

	:param G: networkx.Graph
	:param ncycles: El número de ciclos total de ejecución.
	:param nants: El número de hormigas que se usarán.
	:params alpha beta: Los metaparámetros para la obtención de la probabilidad
	                    de elección de los vértices para su coloración.
	:param rho: Metaparámetro para la evaporación.
	:param k: El número de particiones existentes en la gráfica sobre la que se trabajará.
	          Servirá potencialmente para hacer optimizaciones.
	:return: La lista de clases de colores que se obtuvieron para la coloración de G.
	:rtype: [ColorClass].
	"""
    tp.banner("Lista de Vértices V: ")
    V = list(G)
    print(V)
    tp.banner("Lista de aristas E: ")
    E = list(G.edges)
    print(E)
    t = initialise_trail_matrix(V)  # Inicializar matriz de rastros.
    list_color_classes = []
    for cycle in range(1, ncycles + 1):
        print("> ciclo:", cycle)
        delta = initialise_trail_update_matrix(
            t)  # Inicializar matriz de actualización de rastros.

        for ant in range(1, nants + 1):
            print("\t-- hormiga:", ant)
            X = V  # Inicializar la lista de vértices no coloreados.
            k = 0  # Inicializar el número de colores usados.
            while X:
                k = k + 1
                C_k = ColorClass(k)  # Inicializar la clase de color k.
                list_color_classes.append(C_k)

                F = X  # Inicializar la lista de vértices aún factibles para colorear con k.
                i = select_with_probability(
                    F, 1 / len(F))  # Seleccionar i ∈ F con probabilidad 1/|F|.
                COLOUR_VERTEX(G, i, k, list_color_classes, F, X)
                while F:

                    i = select_pik(G, list_color_classes, alpha, beta, t, F)
                    COLOUR_VERTEX(G, i, k, list_color_classes, F, X)

            update_trail_update_matrix(
                G, delta, k)  # Actualizar matriz de actualización de rastros.
        update_trail_matrix(G, t, delta, rho)  # Actualizar matriz de rastros.

    return list_color_classes  # Regresar las clases de color.
Beispiel #6
0
def main():

    check_schema_migration()
    tableprint.banner("Easier Life! Easier Work!")
    context = Context()

    while True:
        try:
            process_input(context)
        except KeyboardInterrupt:
            continue
        except EOFError:
            break
    print('GoodBye!')
Beispiel #7
0
def disp_results(results,imp=False) :
	results, impurities = results[0], results[1]
	yx=[]
	for molecule in results.molecules :
		yx.append((molecule.name,molecule.mass,molecule.charge,molecule.probability))
	tp.banner('Combinaison results')
	tp.table(yx,['Combinaison','DA','Charge','Probability'], style='fancy_grid', width=25)

	if imp!=False :
		yx = []
		for molecule in impurities.molecules:
			yx.append((molecule.name, molecule.mass, molecule.charge, molecule.probability))
		tp.banner('Impurities')
		tp.table(yx, ['Combinaison', 'DA', 'Charge', 'Relative Probability'], style='fancy_grid', width=25)
Beispiel #8
0
def fit_ln(expt, ci, stim, activation, l2_reg=0.1):

    if activation.lower() == 'rbf':
        model_args = (30, 6)
    else:
        model_args = ()

    model = functools.partial(linear_nonlinear,
                              activation=activation,
                              l2_reg=l2_reg)
    tp.banner(
        f'Training LN-{activation}, expt {args.expt}, {args.stim}, cell {ci+1:02d}'
    )
    train(model,
          expt,
          stim,
          model_args=model_args,
          lr=1e-2,
          nb_epochs=500,
          val_split=0.05,
          cells=[ci])
Beispiel #9
0
    def write_to_screen(self):
        """
        The key method to this class
        prints the data frame in a nice manner which scales to the terminal size
        available to the user.
        """
        screen_width, widths, modified_data_frame = self.fit_screen()

        tp.banner(
            self.config.banner,
            width=screen_width,
        )

        if self.data_frame.empty:
            tp.banner(
                self.config.empty_banner,
                width=screen_width,
            )
            return

        tp.dataframe(modified_data_frame, width=widths)
Beispiel #10
0
def disp_select(results,DA) :
	results, impurities = results[0], results[1]
	yx=[]
	total=0
	for molecule in results.molecules :
		if molecule.mass == DA :
			yx.append([molecule.name,molecule.charge,molecule.probability])
			total+=molecule.probability
	for molecule in yx :
		molecule.append(molecule[-1]/total*100)
	tp.banner('DA Probability Results for %s' % DA)
	tp.table(yx,['Combinaison','Charge','Overall Probability','DA Probability (%)'], style='fancy_grid', width=25)
	yx=[]
	for molecule in impurities.molecules :
		if molecule.mass == DA :
			yx.append([molecule.name,molecule.charge,molecule.probability])
			total+=molecule.probability
	for molecule in yx :
		molecule.append(molecule[-1]/total*100)
	tp.banner('Impurities at %s' % DA)
	tp.table(yx,['Combinaison','Charge','Relative Probability','DA Probability (%)'], style='fancy_grid', width=25)
Beispiel #11
0
    def message(self, msg):
        #print(msg['type'])
        if(str(msg['type']) =='chat'):
            if(len(msg['body'])>3000):
                tp.banner('<-------IMAGEN RECIBIDA------->')
                received = msg['body'].encode('utf-8')
                received = base64.decodebytes(received)
                #save the imagen in dir
                with open("images/imagen.jpg", "wb") as file_path:
                    file_path.write(received)
                # open de image, en another windows
                with Image.open('images/imagen.jpg') as img:
                    img.show()
                print('Siga escogiendo una opcion: ')
            else:
                tp.banner('<-------NUEVO MENSAJE PRIVADO RECIBIDO------->')
                print('De: %s' % str(msg['from']).split('@')[0])
                print('Mensaje: %s ' % msg['body'])
                print('Siga escogiendo una opcion:')

        # message group
        elif(str(msg['type']) =='groupchat'):
            tp.banner('<-------MENSAJE EN LA SALA %s------->' % str(msg['from']).split('@')[0])
            #print(msg['from][0])
            print('De: %s' % str(msg['from']).split('@')[0])
            print('Mensaje: %s ' % msg['body'])
            print('Siga escogiendo una opcion:')
Beispiel #12
0
 def summary(self):
     # 打印整个sample的规则
     headers = ['Parameter', 'Value']
     content = [['unit', self.get_unit()],
                ['tmin', self.get_tmin()],
                ['tmax', self.get_tmax()],
                ['n_splits', self.get_n_splits()],
                ['test_size', self.get_test_size()],
                ['class_balance', 'True' if self.get_class_balance() else
                 'False'],
                ['data_balance', 'True' if self.get_data_balance() else
                 'False'],
                ['epoch_padding', 'True' if self.epoch_padding else
                 'False'],
                ['data_padding', 'True' if self.data_padding else 'False'],
                ['max_len', self.max_len if self.max_len is not None else
                 'Not Set'],
                ['x', str(self.get_x())],
                ['y', str(self.get_y())],
                ['Sample Mode', self.mode if hasattr(self, 'mode') else 
                 'Not Set']]
     tp.banner('Your Sampling Settings')
     tp.table(content, headers)
Beispiel #13
0
	:param G: networkx.Graph
	:param t: Matriz de rastros.
	:param delta: Matriz de actualización de la matriz de rastros.
	:param rho: Metaparámetro para la evaporación de los rastros (feromonas).
	"""
    for i in range(len(G.nodes)):
        for j in range(len(G.nodes)):
            if i != j:
                t[i][j] = (rho * t[i][j]) + delta[i][j]


if __name__ == '__main__':

    # Inicio y pidiendo máximo número de vértices al usuario.
    tp.banner("Algoritmo ANTCOL (Algoritmo ACO de Dowsland y Thompson).")
    num_vertices = int(input("Máximo número de vértices (recomiendo 30): "))

    # Contrucción de la gráfica k-partita y dibujándola.
    print(
        "> Contruyendo gráfica aleatoria k-partita que sabemos es k-coloreable..."
    )
    G, k = create_k_partite(num_vertices)
    print(
        "\n> Mostrando la gráfica generada (cerrar el plot para continuar)...")
    plt.figure()
    title = "Gráfica G original, " + str(k) + "-partita"
    plt.title(title, color='blue')
    nx.draw(G,
            node_color='black',
            with_labels=True,
Beispiel #14
0
          expt,
          stim,
          model_args=model_args,
          lr=1e-2,
          nb_epochs=500,
          val_split=0.05,
          cells=[ci])


if __name__ == '__main__':
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

    # expts = ('15-10-07', '15-11-21a', '15-11-21b')
    # stims = ('whitenoise', 'naturalscene')

    parser = argparse.ArgumentParser(description='Train a BN_CNN model')
    parser.add_argument('--expt', help='Experiment date (e.g. 15-10-07)')
    parser.add_argument('--stim', help='Stimulus class (e.g. naturalscene)')
    parser.add_argument('--model',
                        help='Model architecture (e.g. BN_CNN or LN_softplus)')
    parser.add_argument('--cell', help='Cell index (only for LN models)')
    args = parser.parse_args()

    if args.model.upper() == 'BN_CNN':
        tp.banner(f'Training BN_CNN, expt {args.expt}, {args.stim}')
        fit_bn_cnn(args.expt, args.stim)

    elif args.model.split('_')[0].upper() == 'LN':
        activation = args.model.split('_')[1]
        fit_ln(args.expt, int(args.cell), args.stim, activation)
Beispiel #15
0
    #print('modelo_simulacion\Scripts\activate.bat')
    # variables para modelar la simulacion dentro de graficas
    wait_time = []
    wait_time_attend = []
    cliente_served = []

    enviroment = simpy.Environment()
    number_cajas = input('Ingrese el numero de las cajas: ')
    number_cajas = int(number_cajas)
    cajas = [
        Cajero(enviroment, number) for number in range(1, number_cajas + 1)
    ]
    enviroment.process(Cliente.setup(enviroment, cajas))
    enviroment.run(until=500 * 2)

    #recoremos la cajas
    for caja in cajas:
        cliente_served.append(caja.client_served)
        number = caja.numeros_cajero
        tp.banner(emoji.emojize(':money_bag: CAJA %d :money_bag:' % number))
        # bug entre más cajero no encuentra el tiempo priomedio en la cola
        print('El tiempo promedio de un cliente en la cola: %d segundos' %
              (np.mean(caja.timeout)))
        print('Total de Clientes Atendidos: %d' % caja.client_served)
        print('Número de clientes en la cola: %d' % caja.cola)
        total = np.sum(cliente_served)
        grado = (caja.client_served / total) * 100
        print('Grado de utilización de cada cajero %f:' % grado)

    print(emoji.emojize('Python is :money_bag:'))
Beispiel #16
0
 def new_user_add(self, presence):
     tp.banner('TE HA AGREGADO EL USUARIO: %s' % str(presence['from']))
Beispiel #17
0
 def on_epoch_begin(self, epoch, logs={}):
     tp.banner(f"Epoch {epoch}")
     print(tp.header(['iter', 'loss']))
Beispiel #18
0

def action_list(b_list):
    action = ['']
    for i in range(len(b_list) - 1):
        if b_list[i] == b_list[i + 1]:
            action.append('')
        else:
            if b_list[i + 1]:
                action.append('buy')
            else:
                action.append('sell')
    return (action)


tableprint.banner('Exponential Moving Average Cross Over Strategy')

PRICE_LIST = list_price(END_TIME)
SHORT_EMA = ema_list(PRICE_LIST, SHORT)
LONG_EMA = ema_list(PRICE_LIST, LONG)
ACTION_LIST = action_list(action(PRICE_LIST, SHORT, LONG))

# Dictionaries

ACCOUNT_A1 = {'USD': 1000.00, 'BTC': 0.0}
BOOLEAN_ACTION = {True: 'buy', False: 'sell'}
BOOLEAN_CURRENCY = {True: 'BTC', False: 'USD'}
BOOLEAN_PRICE = {True: 1, False: -1}

# Account Currency Currency Float Float -> Account
# For a given account, currency to be sold, currency to be bought,
Beispiel #19
0
    def fit(self, num_alt=2, max_iter=20, num_likelihood_steps=50, disp=2, check_grad=None, callback=None):
        """
        Runs an optimization algorithm to learn the parameters of the model given training data and regularizers

        Parameters
        ----------
        num_alt : int, optional
            The number of times to alternate between optimizing nonlinearities and optimizing filters. Default: 2

        max_iter : int, optional
            The maximum number of steps to take during each leg of the alternating minimization. Default: 25

        num_likelihood_steps : int, optional
            The number of steps to take when optimizing the data likelihood term (using SFO)

        disp : int, optional
            How much information to display during optimization. (Default: 2)

        check_grad : string, optional
            If 'f' or 'W', then the gradient of the log-likelihood objective with respect to that parameter is checked
            against a numerical estimate.

        callback : function
            A callback function that gets called each iteration with the current parameters and a dictionary of other information

        Notes
        -----
        See the `proxalgs` module for more information on the optimization algorithm
        """
        # grab the initial parameters
        theta_current = {
            'W': self.theta_init['W'].copy(),
            'f': self.theta_init['f'].copy()
        }

        # get list of training data
        train_data = [self.data[idx] for idx in self.indices['train']]

        # data generator
        def datagen():
            while True:
                yield np.random.choice(train_data, 1)[0]

        # store train/test results during optimization
        self.convergence = defaultdict(list)

        def update_results():
            if disp > 0:
                tmp_results = self.print_test_results(theta_current)
                for k in ('train', 'test'):
                    self.convergence[k].append(tmp_results[k])

        # runs the optimization procedure for one set of parameters (a single
        # leg of the alternating minimization)
        def optimize_param(f_df_wrapper, param_key, check_grad, cur_iter):

            # initialize the SFO instance
            loglikelihood_optimizer = SFO(
                f_df_wrapper,
                theta_current[param_key],
                train_data,
                display=0)

            # check gradient
            if check_grad == param_key:
                loglikelihood_optimizer.check_grad()

            # initialize the optimizer object
            opt = Optimizer('sfo', optimizer=loglikelihood_optimizer, num_steps=num_likelihood_steps)

            # add regularization terms
            [opt.add_regularizer(reg) for reg in self.regularizers[param_key]]

            # run the optimization procedure
            t0 = perf_counter()
            opt.minimize(
                theta_current[param_key],
                max_iter=max_iter,
                disp=disp,
                callback=callback)
            t1 = perf_counter() - t0
            print('Finished optimizing ' + param_key + '. Elapsed time: ' + tp.humantime(t1))

            return opt.theta

        # print results based on the initial parameters
        print('\n')
        tp.banner('Initial parameters')
        update_results()

        try:

            # alternating optimization: switch between optimizing nonlinearities,
            # and optimizing filters
            for alt_iter in range(num_alt):

                # Fit filters
                print('\n')
                tp.banner('Fitting filters')

                # wrapper for the objective and gradient
                def f_df_wrapper(W, d):
                    return self.f_df(W, theta_current['f'], d, param_gradient='W')

                # run the optimization procedure for this parameter
                Wk = optimize_param(f_df_wrapper, 'W', check_grad, alt_iter + 0.5).copy()

                # normalize filters
                for filter_index in range(Wk.shape[0]):
                    theta_current['W'][filter_index] = utilities.nrm(Wk[filter_index])

                # print and save test results
                update_results()

                # Fit nonlinearity
                print('\n')
                tp.banner('Fitting nonlinearity')

                # wrapper for the objective and gradient
                def f_df_wrapper(f, d):
                    return self.f_df(theta_current['W'], f, d, param_gradient='f')

                # run the optimization procedure for this parameter
                theta_current['f'] = optimize_param(f_df_wrapper, 'f', check_grad, alt_iter + 1).copy()

                # print and save test results
                update_results()

        except KeyboardInterrupt:
            print('\nCleaning up... ')

        # store learned parameters
        self.theta = copy.deepcopy(theta_current)
        print('Done.\n')
Beispiel #20
0
packet_list = []
packet_dic = {}
with open("output.json") as json_file:
    data_dict = json.load(json_file)

for x in range(0,len(data_dict)):

    if list(data_dict[x]['_source']['layers']['http'])[0] != '_ws.expert':
        request_method = list(data_dict[x]['_source']['layers']['http'])[0]
    else:
        request_method = list(data_dict[x]['_source']['layers']['http'])[1]

    packet_dic = {'No':'', 'requests':'', 'src':'', 'dst':'', 'host': '', 'blacklist_status':''}
    packet_dic['No'] = x
    packet_dic['requests'] = data_dict[x]['_source']['layers']['http'][request_method]['http.request.method']
    packet_dic['src'] = data_dict[x]['_source']['layers']['ip']['ip.src']
    packet_dic['dst'] = data_dict[x]['_source']['layers']['ip']['ip.dst']
    packet_dic['host'] = data_dict[x]['_source']['layers']['http']['http.host']
    packet_dic['blacklist_status'] = urlvoid.packet_check(packet_dic['host'])

    packet_list.append(packet_dic)

    time.sleep(0.9)

headers = ['No', 'Requests', 'SRC', 'DST', 'HOST', 'Blacklist Status']

rows =  [a.values() for a in packet_list]

tableprint.banner('Malware Traffic Analysis With Python')
tableprint.table(rows, headers)
data = data[data.send_bet_status.str.contains('Accepted')]
del data['hilow_content']

# merge with result
data = data.merge(result[['event_id', 'result_corner']],
                  how='inner',
                  on='event_id')
data = data[~data.result_corner.isna()]
data['result_corner'] = data['result_corner'].apply(pd.to_numeric)
data['is_win'] = np.where(data.hilow == 'Low',
                          np.where(data.line > data.result_corner, 1, 0),
                          np.where(data.line < data.result_corner, 1, 0))
data['pnl'] = np.where(data.is_win == 1, data.odds - 1, -1)
data['pnl_actual'] = data.bet_amount * data.pnl

tp.banner("Bet Slips")
# table = []
# cols = ['event_id','bet_amount','hilow','line','is_win','pnl','pnl_actual']
# bet_slip = data[cols].tail(40).values.tolist()
# tp.table(bet_slip, cols)
print(
    "───────────────────────────────────────────────────────────────────────")
print(data[[
    'event_id', 'bet_amount', 'hilow', 'line', 'is_win', 'pnl', 'pnl_actual'
]].tail(40))
sentence_1 = '  Current running profit/loss: $ {} for $ 1 in each bet  '.format(
    round(data['pnl'].sum(), 2))
sentence_2 = '  Current running actual profit/loss: $ {} since starting stack  '.format(
    round(data['pnl_actual'].sum(), 2))
tp.banner(sentence_1)
tp.banner(sentence_2)
Beispiel #22
0
            #print (os.path.dirname(thefile))
            try:
                os.chdir(os.path.dirname(thefile))
            except BaseException:
                os.chdir(rootdir)

            file_num += 1
            count = count + numpages
            print('\n')

'''
print('='*50)
print('检查到 %s个文件共 '%file_num + str(count) + ' 页,其中:')
#print('-'*50)
print("\n")
print(tabulate([["A4",a4,"--->",a4],["A3",a3,"--->",a3*2],["A2",a2,"--->",a2*4],["A1",a1,"--->",a1*8],["A0",a0,"--->",a0*16]],headers=["幅面","页数","","折合A4"],tablefmt='orgtbl'))
#print('其中A4 %s页,A3 %s页, A2 %s页, A1 %s页, A0 %s页' % (a4, a3, a2, a1, a0))
#print('-'*50)
print("\n")
print('折算后总计: %s 页'%(a4+a3*2+a2*4+a1*8+a0*16))
print('-'*50)
'''

tableprint.banner('检查到 %s 个文件共 ' % file_num + str(count) + ' 页', style='clean')
header = ['幅面', '页数', '', '折合A4']
data = [["A4", a4, "--->", a4], ["A3", a3, "--->", a3 * 2], ["A2", a2, "--->", a2 * 4], ["A1", a1, "--->", a1 * 8], ["A0", a0, "--->", a0 * 16]]
tableprint.table(data, header)
tableprint.banner('图纸折算A4后总计 %s 页' % (a4 + a3 * 2 + a2 * 4 + a1 * 8 + a0 * 16), style='clean')
input()
Beispiel #23
0
Escoga una opcion:
'''

states = {
    '1': 'chat',
    '2': 'away',
    '3': 'xa',
    '4': 'dnd'
}
flag = True
# Para mostrar el menu correspondiente
login_check = False

while flag:
    if not(login_check):
        tp.banner('Bienvenido al Server %s' % DOMAIN)
        opcion = input(menu_logging)
    else:
        tp.banner('<-------¿Que desea Realizar?------->')
        opcion = input(menu_interaction)
    
    if(opcion=='1'):
        if not login_check:
            tp.banner('<-------REGISTRO DE USUARIO------->')
            name = input('Ingrese el Nombre: ')
            username = input('Ingrese usuario: ')
            email = input('Ingrese email: ')
            password = input('Ingrese contraseña: ')
            jid = username + DOMAIN
            register = RegistrerUser(jid, password, name, email)
            if register.connect():
Beispiel #24
0
def banner(msg, style='banner'):
    print('\033[92m', end='')
    tp.banner(msg, width=60, style=style)
    print('\033[0m', end='')
def printOutput(output):
    tp.banner(output)
    logging.debug(output)
Beispiel #26
0
def train(model, experiment, monitor, num_epochs, augment=False):
    """Train the given network against the given data

    Parameters
    ----------
    model : keras.models.Model or glms.GLM
        A GLM or Keras Model object

    experiment : experiments.Experiment
        An Experiment object

    monitor : io.Monitor
        Saves the model parameters and plots of performance progress

    num_epochs : int
        Number of epochs to train for

    reduce_lr_every : int
        How often to reduce the learning rate

    reduce_rate : float
        A fraction (constant) to multiply the learning rate by

    """
    assert isinstance(model, (Model, GLM)), "'model' must be a GLM or Keras model"

    # initialize training iteration
    iteration = 0
    train_start = time()

    # loop over epochs
    try:
        for epoch in range(num_epochs):
            tp.banner('Epoch #{} of {}'.format(epoch + 1, num_epochs))
            print(tp.header(["Iteration", "Loss", "Runtime"]), flush=True)

            # loop over data batches for this epoch
            for X, y in experiment.train(shuffle=True):

                # update on save_every, assuming it is positive
                if (monitor is not None) and (iteration % monitor.save_every == 0):

                    # performs validation, updates performance plots, saves results to dropbox
                    monitor.save(epoch, iteration, X, y, model.predict)

                # train on the batch
                tstart = time()
                loss = model.train_on_batch({'stim':X, 'loss':y})[0]
                elapsed_time = time() - tstart

                # update
                iteration += 1
                print(tp.row([iteration, float(loss), tp.humantime(elapsed_time)]), flush=True)

            print(tp.bottom(3))

    except KeyboardInterrupt:
        print('\nCleaning up')

    # allows the monitor to perform any post-training visualization
    if monitor is not None:
        elapsed_time = time() - train_start
        monitor.cleanup(iteration, elapsed_time)

    tp.banner('Training complete!')
Beispiel #27
0
def fit_glm(ci, exptdate, stimulus, cutout=7, history=40):
    tp.banner(f'Training GLM, expt {exptdate}, {stimulus}, cell {ci+1:02d}')

    # load experiment
    expt = experiments.loadexpt(exptdate, (ci, ),
                                stimulus,
                                'train',
                                history,
                                5000,
                                cutout_width=cutout)

    tf.reset_default_graph()

    stim = tf.placeholder(tf.float32,
                          shape=(None, *expt.X.shape[1:]),
                          name='stimulus')
    hist = tf.placeholder(tf.float32,
                          shape=(None, *expt.spkhist.shape[1:]),
                          name='spike_history')
    rate = tf.placeholder(tf.float32, (None, 1), name='firing_rate')
    graph = Graph(stim, hist, rate)

    # define model
    model = GLM(l2_filter=1e-4, l2_hist=1e-2)
    pred = model(stim, hist)

    # neg. log-likelihood loss
    epsilon = 1e-6
    dt = 1e-2
    loss = dt * tf.reduce_mean(pred - rate * tf.log(pred + epsilon))

    # training
    opt = tf.train.AdamOptimizer(learning_rate=1e-3)
    regs = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    reg_total = tf.reduce_sum(regs)
    train_op = opt.minimize(loss + reg_total)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    metrics = {
        'loss': loss,
        'filter_norm': tf.nn.l2_loss(model.get_variables()[0]),
        'hist_norm': tf.nn.l2_loss(model.get_variables()[1]),
        'reg_total': reg_total,
        'cc': tf.squeeze(cc(pred, rate)),
        'fev': tf.squeeze(fev(pred, rate)),
        'rmse': tf.squeeze(rmse(pred, rate)),
    }
    for reg in regs:
        if 'spike_history' in reg.name:
            if 'w' in reg.name:
                metrics['l2_hist'] = reg
            else:
                metrics['l2_offset'] = reg
        if 'filter' in reg.name:
            metrics['l2_filter'] = reg

    store = defaultdict(list)

    feed = datafeed(expt, graph, niter=1000)
    for fd in tqdm(feed):
        res = sess.run([train_op, metrics], feed_dict=fd)[1]
        for k, v in res.items():
            store[k].append(v)

    # get parameters
    weights, whist, offset = sess.run(model.get_variables())
    sta = weights.reshape(history, cutout * 2, cutout * 2)
    whist = whist.ravel()

    testdata = experiments.loadexpt(exptdate, (ci, ),
                                    stimulus,
                                    'test',
                                    history,
                                    5000,
                                    cutout_width=cutout)
    test_feed = next(datafeed(testdata, graph, batchsize=None))
    test_pred = sess.run(pred, feed_dict=test_feed)[:, 0]
    test_rate = testdata.y

    # test metrics
    test = {
        'cc': np_wrap(cc)(test_rate, test_pred),
        'rmse': np_wrap(rmse)(test_rate, test_pred),
        'fev': np_wrap(fev)(test_rate, test_pred),
    }

    # save
    results = {
        'stimulus_filter': sta,
        'history_filter': whist,
        'bias': offset,
        'test_scores': test,
        'train_store': dict(store),
        'test_pred': test_pred,
        'test_rate': test_rate,
    }

    basedir = os.path.expanduser('~/research/deep-retina/results/glms/')
    folder = os.path.join(basedir, f'{exptdate}_{stimulus}')
    try:
        os.mkdir(folder)
    except FileExistsError:
        pass
    dd.io.save(os.path.join(folder, f'cell{ci:02d}.h5'), results)