コード例 #1
0
 def download_node_info(self, amount):
     for i in range(amount):
         logging.debug(str(nowtime()) + 'About to get node')
         node_info = requests.get('http://0.0.0.0:'+str(i+5000)+'/node/id')
         self.nodes[node_info.json()['node']] = node_info.json()['address']
     logging.debug(str(nowtime()) + 'All nodes downloaded')
     logging.debug(self.nodes)
コード例 #2
0
def shutdown_server():
    logging.debug(str(nowtime()) + str(amount))
    for i in range(amount):
        node_info = requests.get('http://0.0.0.0:'+str(i+5000)+'/shutdown')
    func = request.environ.get('werkzeug.server.shutdown')
    if func is None:
        raise RuntimeError('Not running with the Werkzeug Server')
    func()
    logging.debug(str(nowtime()) + 'Manager shut')
コード例 #3
0
    def begin_sending(self, amount):
##        with open('important_script.rtf', 'r') as f:
##            read_data = f.read()
##        for word in read_data:
    #add command that adds a single word to the block
            for n in range(amount):
                self.send_to_mine('0.0.0.0:'+str(5000+n))
                logging.debug(str(nowtime()) + 'Sent mining request')
                time.sleep(random.randrange(1, stop=3, step=1))
コード例 #4
0
def start(amount):
    processes = {} #this is where server side thread management will be

    #logging
    cwd = os.getcwd()
    LOG_FILE = cwd + '/Logs/' + str(nowtime()) + 'networkmngr' + '.log'
    logging.basicConfig(filename=LOG_FILE,level=logging.DEBUG)

    for i in range(amount):
        #Starts each node in the background.
        pargz = ['pipenv', 'run', 'python', 'Node.py', '-p', str(5000+i)]
        subprocess.Popen(pargz)
        logging.debug(str(nowtime()) + 'Created a process ' + str(i))
    #delay to let the miners wake up to full operating mode.
    time.sleep(5)
    node.download_node_info(amount)
    node.register_to_all(amount)
    mine_requests = 10000
コード例 #5
0
def main():
    """
    主程序
    """
    cursor, db = init_mysql_connect()
    ds_queue = vaild_ds_list()
    start_time = nowtime()
    while True:
        if nowtime() - start_time > 60*60*24:
            return False
        ds_id = ds_queue.pop(0)
        ds_info = tassadar_api(cursor, ds_id, db)
        print ds_info
        if ds_info['status'] in (1, 0):
            if not vaild_time(ds_info['utime']):
                sleep(TIMESEELP*10)
                ds_queue.append(ds_id)
                print ('该数据源今天没有同步:{},等待600s'.format(ds_id))
                # info().info('该数据源今天没有同步:{},等待600s'.format(ds_id))
                continue
            else:
                # info().info('该数据源同步成功:{}'.format(ds_id))
                pass
        elif ds_info['status'] == 2:
            if not vaild_time(ds_info['utime']):
                sleep(TIMESEELP*10)
                ds_queue.append(ds_id)
                print ('该数据源今天没有同步:{},等待600s'.format(ds_id))
                # info().info('该数据源上次同步失败,今天没有同步:{},等待600s'.format(ds_id))
                continue
            else:
                # info().info('该数据源今日同步失败:{}'.format(ds_id))
                return False
        elif ds_info['status'] == 3:
            sleep(TIMESEELP)
            ds_queue.append(ds_id)
            # info().info('该数据源正在同步中:{},等待60s'.format(ds_id))
            continue
        if len(ds_queue) == 0:
            break
    close(db)
    return True
コード例 #6
0
def vaild_time(utime):
    """
    判断utime时间是否是今天,是返回True
    """
    ds_struct_time = strptime(utime, '%Y-%m-%d %H:%M:%S')
    ds_sec = mktime(ds_struct_time)

    now_sec = nowtime()
    now_struct_time = localtime(now_sec)

    if now_sec - ds_sec > 60*60*24:
        return False
    elif not ds_struct_time.tm_mday == now_struct_time.tm_mday:
        return False
    return True
コード例 #7
0
 def register_to_all(self, amount):
     for i in range(amount):
         r = requests.post('http://0.0.0.0:'+str(i+5000)+'/nodes/register', json = json.dumps(self.format_to_register(self.nodes)))
     logging.debug(str(nowtime()) + 'Nodes list sent to all nodes')
コード例 #8
0
    def run_graph(self, train_data, val_data, test_data, early_stop=False, bad_epoch_tol=0, verbose=True):
        '''

        :param distribution:
        :param num_features:
        :param k: the dimensionality of the embedding, Must be >= 0; when k=0, it is a simple model; Otherwise it is factorized
        :return:
        '''
        tf.reset_default_graph()

        num_features = train_data.num_features()

        # INPUTs
        feature_indice = tf.placeholder(tf.int32, name='feature_indice')
        feature_values = tf.placeholder(tf.float32, name='feature_values')
        header_bids_true = tf.placeholder(tf.float32, name='header_bids')

        # shape: (batch_size, max_nonzero_len)
        embeddings_linear = tf.Variable(tf.truncated_normal(shape=(num_features,), mean=0.0, stddev=1e-5))
        filtered_embeddings_linear = tf.nn.embedding_lookup(embeddings_linear, feature_indice) * feature_values
        intercept = tf.Variable(1e-5)
        location = self.linear_function(filtered_embeddings_linear, intercept)

        embeddings_factorized = None
        filtered_embeddings_factorized = None
        if self.k > 0:
            # shape: (batch_size, max_nonzero_len, k)
            embeddings_factorized = tf.Variable(tf.truncated_normal(shape=(num_features, self.k), mean=0.0, stddev=1e-5))
            filtered_embeddings_factorized = tf.nn.embedding_lookup(embeddings_factorized, feature_indice) * \
                                      tf.tile(tf.expand_dims(feature_values, axis=-1), [1, 1, 1])
            factorized_term = self.factorization_machines(filtered_embeddings_factorized)
            location += factorized_term

        scale = tf.Variable(1.0)
        positive_scale = tf.square(scale) + 1e-6
        log_prob = self.gumbelPDF(header_bids_true, location, positive_scale)
        neg_log_likelihood = tf.reduce_sum(log_prob)

        header_bids_pred = location \
                           # + positive_scale * tf.constant(0.5772)

        batch_loss = tf.losses.mean_squared_error(labels=header_bids_true,
                                                      predictions=header_bids_pred,
                                                      reduction = tf.losses.Reduction.MEAN)
        running_loss, loss_update = tf.metrics.mean(batch_loss)


        # L2 regularized sum of squares loss function over the embeddings
        l2_norm = self.lambda_linear * tf.nn.l2_loss(filtered_embeddings_linear)
        if embeddings_factorized is not None:
            l2_norm += self.lambda_factorized * tf.nn.l2_loss(filtered_embeddings_factorized)

        loss_mean = batch_loss

        training_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(neg_log_likelihood + l2_norm)

        ### gradient clipping
        # optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
        # gradients, variables = zip(*optimizer.compute_gradients(loss_mean))
        # gradients_clipped, _ = tf.clip_by_global_norm(gradients, 5.0)
        # training_op = optimizer.apply_gradients(zip(gradients_clipped, variables))


        # Isolate the variables stored behind the scenes by the metric operation
        running_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)
        # Define initializer to initialize/reset running variables
        running_vars_initializer = tf.variables_initializer(var_list=running_vars)

        init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())


        with tf.Session() as sess:
            init.run()

            max_loss_val = None
            current_bad_epochs = 0
            num_total_batches = int(np.ceil(train_data.num_instances() / self.batch_size))
            for epoch in range(1, self.num_epochs + 1):
                sess.run(running_vars_initializer)
                ''' model training '''
                num_batch = 0
                start = nowtime()
                for hb_batch, featidx_batch, featval_batch in train_data.make_sparse_batch(self.batch_size):
                    num_batch += 1

                    # print(hb_batch)
                    # print(featidx_batch)
                    # print(featval_batch)

                    _, loss_batch, hb_pred, hb_true, pos_scale = sess.run([training_op, loss_mean, header_bids_pred,
                                                                header_bids_true, positive_scale],
                                             feed_dict={
                                             'feature_indice:0': featidx_batch,
                                             'feature_values:0': featval_batch,
                                             'header_bids:0': hb_batch})

                    # print(pos_scale)
                    # print(hb_pred)

                    if verbose and epoch == 1:
                        print("Epoch %d - Batch %d/%d: batch loss = %.4f" %
                              (epoch, num_batch, num_total_batches, loss_batch))
                        print("\t\t\t\ttime: %.4fs" % (nowtime() - start))
                        start = nowtime()

                # evaluation on training data
                eval_nodes_update = [loss_update, neg_log_likelihood, header_bids_pred]
                eval_nodes_metric = [running_loss]
                print()
                print("========== Evaluation at Epoch %d ==========" % epoch)
                print('*** On Training Set:')
                [loss_train], _, _ = self.evaluate(train_data.make_sparse_batch(),
                                                                 running_vars_initializer, sess,
                                                                 eval_nodes_update, eval_nodes_metric,
                                                                 )
                print("TENSORFLOW:\tMSE = %.6f" % loss_train)

                # evaluation on validation data
                print('*** On Validation Set:')
                [loss_val], hb_pred_val, hb_true_val = self.evaluate(val_data.make_sparse_batch(),
                                                           running_vars_initializer, sess,
                                                           eval_nodes_update, eval_nodes_metric,
                                                           )
                print("TENSORFLOW:\tMSE = %.6f" % loss_val)

                # print(loss_val, max_loss_val)
                if max_loss_val is None or loss_val < max_loss_val:
                    current_bad_epochs = 0
                    print("!!! GET THE LOWEST VAL LOSS !!!")
                    max_loss_val = loss_val

                    # evaluation on test data
                    print('*** On Test Set:')
                    [loss_test], hb_pred_test, hb_true_test = self.evaluate(test_data.make_sparse_batch(),
                                                                            running_vars_initializer, sess,
                                                                            eval_nodes_update, eval_nodes_metric,
                                                                            )
                    print("TENSORFLOW:\tMSE = %.6f" % loss_test)

                    prediction_result = pd.DataFrame(
                        {'y_pred': hb_pred_test,
                         'y_true': hb_true_test
                         })
                    prediction_result.to_pickle(os.path.join(INPUT_DIR, OUTPUT_PKL_NAME % self.hb_agent_name))

                elif early_stop:
                    current_bad_epochs += 1
                    if current_bad_epochs == bad_epoch_tol:
                        break
コード例 #9
0
import yaml
import jinja2
from netmiko import ConnectHandler
from time import time as nowtime

starttime = nowtime()

with open("devices.yaml", "r") as f:
    devices = yaml.safe_load(f)

with open("template.j2", "r") as f:
    template = jinja2.Template(f.read())

for device in devices:

    conn = ConnectHandler(
        host=device["address"],
        username="******",
        password="******",
        device_type="cisco_ios",
    )

    conf_to_send = template.render(device=device).splitlines()
    conn.send_config_set(conf_to_send)

runtime = nowtime() - starttime
print(f"Took {runtime} seconds to complete")
コード例 #10
0
    def run_graph(self,
                  num_features,
                  train_data,
                  val_data,
                  test_data,
                  sample_weights=None):
        '''

        :param distribution:
        :param num_features:
        :param k: the dimensionality of the embedding, Must be >= 0; when k=0, it is a simple model; Otherwise it is factorized
        :return:
        '''
        # INPUTs
        feature_indice = tf.placeholder(tf.int32, name='feature_indice')
        feature_values = tf.placeholder(tf.float32, name='feature_values')

        min_hbs = tf.placeholder(tf.float32,
                                 name='min_headerbids')  # for regularization
        max_hbs = tf.placeholder(tf.float32,
                                 name='max_headerbids')  # for regularization

        times = tf.placeholder(tf.float32, shape=[None], name='times')
        events = tf.placeholder(tf.int32, shape=[None], name='events')

        # shape: (batch_size, max_nonzero_len)
        embeddings_linear = tf.Variable(
            tf.truncated_normal(shape=(num_features, ), mean=0.0, stddev=1e-5))
        filtered_embeddings_linear = tf.nn.embedding_lookup(
            embeddings_linear, feature_indice) * feature_values
        intercept = tf.Variable(1e-5)
        linear_term = self.linear_function(filtered_embeddings_linear,
                                           intercept)
        scale = linear_term

        embeddings_factorized = None
        filtered_embeddings_factorized = None
        if self.k > 0:
            # shape: (batch_size, max_nonzero_len, k)
            embeddings_factorized = tf.Variable(
                tf.truncated_normal(shape=(num_features, self.k),
                                    mean=0.0,
                                    stddev=1e-5))
            filtered_embeddings_factorized = tf.nn.embedding_lookup(embeddings_factorized, feature_indice) * \
                                      tf.tile(tf.expand_dims(feature_values, axis=-1), [1, 1, 1])
            factorized_term = self.factorization_machines(
                filtered_embeddings_factorized)
            scale += factorized_term

        scale = tf.nn.softplus(scale)
        ''' 
        if event == 0, right-censoring
        if event == 1, left-censoring 
        '''
        shape = tf.Variable(0.2, trainable=True)
        not_survival_proba = self.distribution.left_censoring(
            times, scale, shape)  # the left area

        not_survival_bin = tf.where(tf.greater_equal(not_survival_proba, 0.5),
                                    tf.ones(tf.shape(not_survival_proba)),
                                    tf.zeros(tf.shape(not_survival_proba)))

        running_acc, acc_update = None, None
        if not sample_weights:
            running_acc, acc_update = tf.metrics.accuracy(
                labels=events, predictions=not_survival_bin)
        elif sample_weights == 'time':
            running_acc, acc_update = tf.metrics.accuracy(
                labels=events, predictions=not_survival_bin, weights=times)

        batch_loss = None
        if not sample_weights:
            batch_loss = tf.losses.log_loss(labels=events,
                                            predictions=not_survival_proba,
                                            reduction=tf.losses.Reduction.MEAN)
        elif sample_weights == 'time':
            # class_weights = tf.where(tf.equal(events, 1),
            #                             tf.ones(tf.shape(events)) * 100,
            #                             tf.ones(tf.shape(events)))
            batch_loss = tf.losses.log_loss(labels=events,
                                            predictions=not_survival_proba,
                                            weights=times,
                                            reduction=tf.losses.Reduction.MEAN)
        running_loss, loss_update = tf.metrics.mean(batch_loss)

        # Header Bidding Regularization
        hb_adxwon_partitions = tf.cast(
            tf.logical_and(
                tf.equal(events, 0),  # adx won
                tf.logical_and(
                    tf.not_equal(0.0, max_hbs),  # the max_hb is not missing
                    tf.less(times, max_hbs)
                    # tf.less(times, min_hbs),
                    # tf.logical_and(
                    #                                #     # tf.less(times, max_hbs),  # the max hb > the revenue
                    #                                #                # tf.less(max_hbs - time, 1.0)  # remove the outliers
                    #                                #                tf.less(times, min_hbs),
                    #                                #                tf.less((max_hbs - times) / times, 0.01)
                    #                                #                # tf.logical_and(
                    #                                #                #     tf.less((max_hbs - times) / times, 0.01),
                    #                                #                #     tf.less(times, 10.0)
                    #                                #                # )
                    #                                #                )
                )),
            tf.int32)
        hb_adxlose_partitions = tf.cast(
            tf.logical_and(
                tf.equal(events, 1),  # adx lose
                tf.logical_and(
                    tf.not_equal(0.0, min_hbs),  # the min_hb is not missing
                    tf.less(min_hbs, times)  # the min hb < the floor
                    # tf.less(max_hbs, times),
                    # tf.logical_and(
                    #                tf.less(min_hbs, times),
                    #                # tf.less(max_hbs - time, 1.0)  # remove the outliers
                    #                tf.less(0.9, (times - min_hbs) / times)
                    #                # tf.logical_and(
                    #                #     tf.less(0.1, (times - min_hbs) / times),
                    #                #     tf.less(times, 10.0)
                    #                # )
                    #                )
                )),
            tf.int32)

        # Using boolean_mask instead of dynamic_partition leads to:
        # "UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory."
        # https://stackoverflow.com/questions/44380727/get-userwarning-while-i-use-tf-boolean-mask?noredirect=1&lq=1
        regable_hb_adxwon = tf.dynamic_partition(max_hbs, hb_adxwon_partitions,
                                                 2)[1]
        regable_hb_adxlose = tf.dynamic_partition(min_hbs,
                                                  hb_adxlose_partitions, 2)[1]
        regable_scale_adxwon = tf.dynamic_partition(scale,
                                                    hb_adxwon_partitions, 2)[1]
        regable_scale_adxlose = tf.dynamic_partition(scale,
                                                     hb_adxlose_partitions,
                                                     2)[1]

        hb_adxwon_pred = self.distribution.left_censoring(
            regable_hb_adxwon, regable_scale_adxwon, shape)
        hb_adxlose_pred = self.distribution.left_censoring(
            regable_hb_adxlose, regable_scale_adxlose, shape)

        hb_reg_adxwon, hb_reg_adxlose = None, None
        if not sample_weights:
            # if True:
            hb_reg_adxwon = tf.losses.log_loss(labels=tf.zeros(
                tf.shape(hb_adxwon_pred)),
                                               predictions=hb_adxwon_pred)
            hb_reg_adxlose = tf.losses.log_loss(labels=tf.zeros(
                tf.shape(hb_adxlose_pred)),
                                                predictions=hb_adxlose_pred)
        elif sample_weights == 'time':
            regable_time_adxwon = tf.dynamic_partition(times,
                                                       hb_adxwon_partitions,
                                                       2)[1]
            regable_time_adxlose = tf.dynamic_partition(
                times, hb_adxlose_partitions, 2)[1]
            hb_reg_adxwon = tf.losses.log_loss(
                labels=tf.ones(tf.shape(hb_adxwon_pred)),
                predictions=hb_adxwon_pred,
                weights=1.0 / regable_time_adxwon)
            hb_reg_adxlose = tf.losses.log_loss(
                labels=tf.zeros(tf.shape(hb_adxlose_pred)),
                predictions=hb_adxlose_pred,
                weights=1.0 / regable_time_adxlose)
        mean_hb_reg_adxwon = tf.reduce_mean(hb_reg_adxwon)
        mean_hb_reg_adxlose = tf.reduce_mean(hb_reg_adxlose)

        # L2 regularized sum of squares loss function over the embeddings
        '''
        l2_norm = tf.constant(self.lambda_linear) * tf.pow(embeddings_linear, 2)
        if embeddings_factorized is not None:
            l2_norm += tf.reduce_sum(tf.pow(embeddings_factorized, 2), axis=-1)
        sum_l2_norm = tf.constant(self.lambda_factorized) * tf.reduce_sum(l2_norm)
        '''
        l2_norm = self.lambda_linear * tf.nn.l2_loss(
            filtered_embeddings_linear)
        if embeddings_factorized is not None:
            l2_norm += self.lambda_factorized * tf.nn.l2_loss(
                filtered_embeddings_factorized)


        loss_mean = batch_loss + \
                    tf.constant(self.lambda_hb_adxwon) * mean_hb_reg_adxwon + \
                    tf.constant(self.lambda_hb_adxlose) * mean_hb_reg_adxlose + \
                    l2_norm
        # training_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(loss_mean)

        ### gradient clipping
        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
        gradients, variables = zip(*optimizer.compute_gradients(loss_mean))
        gradients_clipped, _ = tf.clip_by_global_norm(gradients, 5.0)
        training_op = optimizer.apply_gradients(
            zip(gradients_clipped, variables))

        # Isolate the variables stored behind the scenes by the metric operation
        running_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)
        # Define initializer to initialize/reset running variables
        running_vars_initializer = tf.variables_initializer(
            var_list=running_vars)

        init = tf.group(tf.global_variables_initializer(),
                        tf.local_variables_initializer())

        with tf.Session() as sess:
            init.run()

            max_loss_val = None

            num_total_batches = int(
                np.ceil(train_data.num_instances / self.batch_size))
            for epoch in range(1, self.num_epochs + 1):
                sess.run(running_vars_initializer)
                # model training
                num_batch = 0
                start = nowtime()
                for time_batch, event_batch, featidx_batch, featval_batch, minhbs_natch, maxhbs_batch, max_nz_len \
                        in train_data.make_sparse_batch(self.batch_size, only_freq=ONLY_FREQ_TRAIN):

                    num_batch += 1

                    _, loss_batch, _, event_batch, time_batch, shape_batch = sess.run(
                        [
                            training_op, loss_mean, acc_update, events, times,
                            shape
                        ],
                        feed_dict={
                            'feature_indice:0': featidx_batch,
                            'feature_values:0': featval_batch,
                            'min_headerbids:0': minhbs_natch,
                            'max_headerbids:0': maxhbs_batch,
                            'times:0': time_batch,
                            'events:0': event_batch
                        })

                    # print()
                    # print('mean_hb_reg_adxwon_batch')
                    # print(mean_hb_reg_adxwon_batch)
                    # print('mean_hb_reg_adxlose_batch')
                    # print(mean_hb_reg_adxlose_batch)
                    # print('mean_batch_loss_batch')
                    # print(mean_batch_loss_batch)
                    # print("event_batch")
                    # print(event_batch)
                    # print('shape_batch')
                    # print(shape_batch)

                    if epoch == 1:
                        print(
                            "Epoch %d - Batch %d/%d: batch loss = %.4f" %
                            (epoch, num_batch, num_total_batches, loss_batch))
                        print("                         time: %.4fs" %
                              (nowtime() - start))
                        start = nowtime()

                # evaluation on training data
                eval_nodes_update = [
                    loss_update, acc_update, not_survival_proba, scale, max_hbs
                ]
                eval_nodes_metric = [running_loss, running_acc]
                print()
                print("========== Evaluation at Epoch %d ==========" % epoch)
                print('*** On Training Set:')
                (loss_train, acc_train), _, _, _, _, _ = self.evaluate(
                    train_data.make_sparse_batch(only_freq=ONLY_FREQ_TEST),
                    running_vars_initializer, sess, eval_nodes_update,
                    eval_nodes_metric, sample_weights)
                # print("TENSORFLOW:\tloss = %.6f\taccuracy = %.4f" % (loss_train, acc_train))

                # evaluation on validation data
                print('*** On Validation Set:')
                (
                    loss_val, acc_val
                ), not_survival_val, _, _, events_val, times_val = self.evaluate(
                    val_data.make_sparse_batch(only_freq=ONLY_FREQ_TEST),
                    running_vars_initializer, sess, eval_nodes_update,
                    eval_nodes_metric, sample_weights)
                # print("TENSORFLOW:\tloss = %.6f\taccuracy = %.4f" % (loss_val, acc_val))
                print("Validation C-Index = %.4f" %
                      c_index(events_val, not_survival_val, times_val))

                if max_loss_val is None or loss_val < max_loss_val:
                    print("!!! GET THE LOWEST VAL LOSS !!!")
                    max_loss_val = loss_val

                    # evaluation on test data
                    print('*** On Test Set:')
                    (
                        loss_test, acc_test
                    ), not_survival_test, scale_test, max_hbs_test, events_test, times_test = self.evaluate(
                        test_data.make_sparse_batch(only_freq=ONLY_FREQ_TEST),
                        running_vars_initializer, sess, eval_nodes_update,
                        eval_nodes_metric, sample_weights)
                    # print("TENSORFLOW:\tloss = %.6f\taccuracy = %.4f" % (loss_test, acc_test))
                    print("TEST C-Index = %.4f" %
                          c_index(events_test, not_survival_test, times_test))

                    # Store prediction results
                    with open('output/all_predictions_factorized.csv',
                              'w',
                              newline="\n") as outfile:
                        csv_writer = csv.writer(outfile)
                        csv_writer.writerow(('NOT_SURV_PROB', 'EVENTS',
                                             'MAX(RESERVE, REVENUE)', 'MAX_HB',
                                             'SCALE', 'SHAPE'))
                        sh = shape.eval()
                        for p, e, t, h, sc in zip(not_survival_test,
                                                  events_test, times_test,
                                                  max_hbs_test, scale_test):
                            csv_writer.writerow((p, e, t, h, sc, sh))
                    print('All predictions are outputted for error analysis')

                    # Store parameters
                    params = {
                        'embeddings_linear': embeddings_linear.eval(),
                        'intercept': intercept.eval(),
                        'shape': shape.eval(),
                        'distribution_name': type(self.distribution).__name__
                    }
                    if embeddings_factorized is not None:
                        params[
                            'embeddings_factorized'] = embeddings_factorized.eval(
                            ),
                    pickle.dump(params,
                                open('output/params_k%d.pkl' % self.k, 'wb'))