Exemplo n.º 1
0
    def __init__(self, database_settings):

        self.settings = database_settings

        log_furl = get_logger('HcpReader').handlers[0].stream.name
        set_logger('HcpDownloader',
                   database_settings['LOGGING']['downloader_level'], log_furl)
        self.logger = get_logger('HcpDownloader')
Exemplo n.º 2
0
    def __init__(self, database_settings):

        self.base_path = database_settings['SERVERS']['dti_server_url']
        self.local_path = database_settings['DIRECTORIES'][
            'local_server_directory']

        log_furl = get_logger('HcpReader').handlers[0].stream.name
        set_logger('DtiDownloader',
                   database_settings['LOGGING']['downloader_level'], log_furl)
        self.logger = get_logger('DtiDownloader')
Exemplo n.º 3
0
def test_logger_log(caplog):
    logger = get_logger(__name__)
    logger.error("This is an error")
    logger.warning("This is a warning")
    logger.info("This is an informational message")
    logger.debug("This is a debug message")
    assert "This is an error" in caplog.text
    assert "This is a warning" in caplog.text
    assert "This is an informational message" not in caplog.text
    assert "This is a debug message" not in caplog.text
Exemplo n.º 4
0
    def __init__(self, settings, params):

        self.logger = get_logger('HcpDataset')
        self.local_folder = settings['DIRECTORIES']['local_server_directory']
        self.parc = params['PARCELLATION']['parcellation']
        self.inflation = params['SURFACE']['inflation']
        self.tr = float(params['FMRI']['tr'])
        self.physio_sampling_rate = int(params['FMRI']['physio_sampling_rate'])
        self.regress_physio = params['FMRI']['regress_physio']

        list_file = 'subjects_inter.txt'
        list_url = os.path.join(get_root(), 'conf', list_file)
        subjects_strut = load_subjects(list_url)

        structural_file = 'struct_dti.mat'
        structural_url = os.path.join(get_root(), 'conf', 'hcpdata',
                                      structural_file)
        self.S = load_strucutural(subjects_strut, structural_url)
Exemplo n.º 5
0
    def __init__(self, params, device, regime, coarsen=None):

        database_settings = get_database_settings()

        log_furl = os.path.join(params['FILE']['experiment_path'], 'log', 'downloader.log')
        set_logger('HcpDataset', database_settings['LOGGING']['dataloader_level'], log_furl)
        self.logger = get_logger('HcpDataset')
        self.logger.info('*** starting new {:} dataset'.format(regime))

        self.device = device
        self.session = params['SESSION'][regime]

        self.reader = HcpReader(database_settings, params)

        list_url = os.path.join(params['FILE']['experiment_path'], 'conf', regime, self.session, 'subjects.txt')
        self.subjects = self.reader.load_subject_list(list_url)

        if coarsen is None:
            coarsen = TrivialCoarsening()
        self.coarsen = coarsen

        self.transform = SlidingWindow(params['TIME_SERIES'], coarsen=coarsen)
Exemplo n.º 6
0
    def __init__(self, database_settings, params):

        log_furl = os.path.join(params['FILE']['experiment_path'], 'log',
                                'downloader.log')
        set_logger('HcpReader',
                   database_settings['LOGGING']['dataloader_level'], log_furl)
        self.logger = get_logger('HcpReader')

        self.local_folder = database_settings['DIRECTORIES'][
            'local_server_directory']
        self.delete_nii = strtobool(
            database_settings['DIRECTORIES']['delete_after_downloading'])
        self.hcp_downloader = HcpDownloader(database_settings)
        self.dti_downloader = DtiDownloader(database_settings)
        nib.imageglobals.logger = set_logger(
            'Nibabel', database_settings['LOGGING']['nibabel_level'], log_furl)

        self.parcellation = params['PARCELLATION']['parcellation']
        self.inflation = params['SURFACE']['inflation']
        self.tr = float(params['FMRI']['tr'])
        self.fh = float(params['FMRI']['physio_sampling_rate'])
        self.physio_sampling_rate = int(params['FMRI']['physio_sampling_rate'])
        self.regress_physio = strtobool(params['FMRI']['regress_physio'])
Exemplo n.º 7
0
import requests
from django.conf import settings
from requests import RequestException

from custom_auth.oauth.exceptions import GithubError
from custom_auth.oauth.helpers.github_helpers import convert_response_data_to_dictionary, get_first_and_last_name
from util.logging import get_logger

GITHUB_API_URL = "https://api.github.com"
GITHUB_OAUTH_URL = "https://github.com/login/oauth"

NON_200_ERROR_MESSAGE = "Github returned {} status code when getting an access token."

logger = get_logger(__name__)


class GithubUser:
    def __init__(self,
                 code: str,
                 client_id: str = None,
                 client_secret: str = None):
        self.client_id = client_id or settings.GITHUB_CLIENT_ID
        self.client_secret = client_secret or settings.GITHUB_CLIENT_SECRET

        self.access_token = self._get_access_token(code)
        self.headers = {"Authorization": f"token {self.access_token}"}

    def _get_access_token(self, code) -> str:
        data = {
            "code": code,
            "client_id": self.client_id,
Exemplo n.º 8
0
def test_get_logger():
    logger = get_logger(__name__)
    assert logger.level == logging.WARNING
Exemplo n.º 9
0
def run_training():
    """Train mnist for a number of steps."""

    logger = logging.get_logger('evaluationLogger', FLAGS.log_dir,
                                'evaluation', logging.INFO)

    # Get the sets of units and labels for training, validation, and
    # test on mnist.
    data_sets = input_data.read_data_sets(FLAGS.input_data_dir,
                                          FLAGS.fake_data)

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the units and labels.
        units_placeholder, labels_placeholder = placeholder_inputs(
            FLAGS.batch_size)

        # Build a Graph that computes predictions from the inference model.
        logits = lezhin_comics.inference(units_placeholder, FLAGS.hidden_units,
                                         FLAGS.dropout)

        # Add to the Graph the Ops for loss calculation.
        loss = lezhin_comics.loss(logits, labels_placeholder)

        # Add to the Graph the Ops that calculate and apply gradients.
        train_op = lezhin_comics.training(loss, FLAGS.learning_rate)

        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = lezhin_comics.evaluation(logits, labels_placeholder)

        # Build the summary Tensor based on the TF collection of Summaries.
        # summary = tf.summary.merge_all()

        # Add the variable initializer Op.
        init = tf.global_variables_initializer()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Instantiate a SummaryWriter to output summaries and the Graph.
        # summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)

        # And then after everything is built:

        # Run the Op to initialize the variables.
        sess.run(init)

        logger.info(" ".join([
            'step', 'loss_value', 'training_num_examples',
            'training_true_count', 'training_precision',
            'validation_num_examples', 'validation_true_count',
            'validation_precision', 'test_num_examples', 'test_true_count',
            'test_precision'
        ]))
        # Start the training loop.
        pre_training_precision = 0
        for step in range(FLAGS.max_steps):
            # start_time = time.time()

            # Fill a feed dictionary with the actual set of units and labels
            # for this particular training step.
            feed_dict = fill_feed_dict(data_sets.train, units_placeholder,
                                       labels_placeholder)

            # Run one step of the model.  The return values are the activations
            # from the `train_op` (which is discarded) and the `loss` Op.  To
            # inspect the values of your Ops or variables, you may include them
            # in the list passed to sess.run() and the value tensors will be
            # returned in the tuple from the call.
            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)

            # duration = time.time() - start_time

            # Save a checkpoint and evaluate the model periodically.
            if step % 10000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_file, global_step=step)
                # Evaluate against the training set.
                # print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
                # print('Training Data Eval:')
                training_num_examples, training_true_count, training_precision = do_eval(
                    sess, eval_correct, units_placeholder, labels_placeholder,
                    data_sets.train)
                # Evaluate against the validation set.
                # print('Validation Data Eval:')
                validation_num_examples, validation_true_count, validation_precision = do_eval(
                    sess, eval_correct, units_placeholder, labels_placeholder,
                    data_sets.validation)
                # Evaluate against the test set.
                # print('Test Data Eval:')
                test_num_examples, test_true_count, test_precision = do_eval(
                    sess, eval_correct, units_placeholder, labels_placeholder,
                    data_sets.test)
                logger.info(
                    "{:d} {:f} {:d} {:d} {:f} {:d} {:d} {:f} {:d} {:d} {:f}".
                    format(step, loss_value, training_num_examples,
                           training_true_count, training_precision,
                           validation_num_examples, validation_true_count,
                           validation_precision, test_num_examples,
                           test_true_count, test_precision))
                if pre_training_precision > training_precision:
                    logger.info("training_precision is getting low.")
                    break

                pre_training_precision = training_precision
Exemplo n.º 10
0
def run_training(flags,
                 class_number=1,
                 label_managing_function=default_label_managing_function,
                 is_profit=False):
    """Train mnist_example for a number of steps."""

    logger = logging.get_logger('evaluationLogger', flags.log_dir,
                                'evaluation', logging.INFO)

    # Get the sets of units and labels for training, validation, and
    # test on mnist_example.
    # data_sets = read_data(file_name=flags.file_name, company=flags.company, label_name=flags.label_name,
    #                       columns=flags.columns,class_number=class_number, label_profit=label_profit,
    #                       test_rate=flags.test_rate, validation_rate=flags.validation_rate, shuffle=False)
    data_sets = read_data(file_name=flags.file_name,
                          company=flags.company,
                          label_name=flags.label_name,
                          columns=flags.columns,
                          class_number=class_number,
                          label_profit=label_managing_function,
                          test_rate=flags.test_rate,
                          shuffle=False)
    data_sets = to_recurrent_data(data_sets, flags.time_step)
    logger.info("train\ttest")
    logger.info("{:d}\t{:d}".format(len(data_sets.train.units),
                                    len(data_sets.test.units)))
    logger.info("")

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the units and labels.
        units_placeholder, labels_placeholder = placeholder_inputs(
            data_sets.batch_size, flags.time_step, data_sets.column_number)

        # Build a Graph that computes predictions from the inference model.
        logits = inference(units_placeholder, flags.hidden_units,
                           data_sets.column_number, data_sets.class_number,
                           data_sets.batch_size, flags.dropout)

        # Add to the Graph the Ops for loss calculation.
        loss = do_loss(logits, labels_placeholder)

        # Add to the Graph the Ops that calculate and apply gradients.
        train_op = training(loss, flags.learning_rate)

        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = evaluation(logits, labels_placeholder)

        # Add the variable initializer Op.
        init = tf.global_variables_initializer()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Run the Op to initialize the variables.
        sess.run(init)

        logger.info("\t".join(['learning_rate', 'max_steps', 'hidden_units']))
        logger.info("{:f}\t{:d}\t{}".format(flags.learning_rate,
                                            flags.max_steps,
                                            flags.hidden_units))
        logger.info("")
        logger.info(" ".join(
            ['step', 'loss_value', 'training_precision', 'test_precision']))
        # Start the training loop.
        for step in range(flags.max_steps + 1):

            # Fill a feed dictionary with the actual set of units and labels
            # for this particular training step.
            feed_dict, labels_feed, dates_feed = fill_feed_dict(
                data_sets.train, units_placeholder, labels_placeholder, flags)

            # Run one step of the model.  The return values are the activations
            # from the `train_op` (which is discarded) and the `loss` Op.  To
            # inspect the values of your Ops or variables, you may include them
            # in the list passed to sess.run() and the value tensors will be
            # returned in the tuple from the call.
            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)

            # Save a checkpoint and evaluate the model periodically.
            if step % (flags.max_steps / 2) == 0:
                checkpoint_file = os.path.join(flags.log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_file, global_step=step)
                # Evaluate against the training set.
                training_average_squared_error = do_eval(
                    sess, eval_correct, units_placeholder, labels_placeholder,
                    data_sets.train, flags)
                # Evaluate against the test set.
                test_average_squared_error = do_eval(sess, eval_correct,
                                                     units_placeholder,
                                                     labels_placeholder,
                                                     data_sets.test, flags)
                logger.info("{:d}\t{:f}\t{:f}\t{:f}".format(
                    step, loss_value, training_average_squared_error,
                    test_average_squared_error))

        # Compare labels(targets) with predictions.
        feed_dict, labels_feed, dates_feed = fill_feed_dict(
            data_sets.test, units_placeholder, labels_placeholder, flags)
        test_predictions = sess.run(logits, feed_dict)
        average_squared_error = sess.run(
            eval_correct, feed_dict=feed_dict) / data_sets.batch_size
        y_label = 'Result'

        if is_profit:
            labels_feed = np.array(labels_feed) + 1
            labels_feed = labels_feed.cumprod()
            labels_feed = labels_feed - 1
            test_predictions = np.array(test_predictions) + 1
            test_predictions = test_predictions.cumprod()
            test_predictions = test_predictions - 1
            y_label = 'Profit'

        # draw a test graph
        matplotlib.rc('font', family='NanumBarunGothicOTF')
        fig, ax = plt.subplots()
        ax.plot(dates_feed, labels_feed, 'r', label='target')
        ax.plot(dates_feed,
                test_predictions,
                'b',
                label='prediction, {:4.3f}'.format(average_squared_error))
        ax.legend()

        title = flags.file_name
        if flags.company is not None:
            title = title + ", " + flags.company
        plt.title(title)
        plt.xlabel('Time Period')
        plt.ylabel(y_label)
        plt.show()
Exemplo n.º 11
0
def run_training(flags, class_number, label_profit):
    """Train mnist_example for a number of steps."""

    logger = logging.get_logger('evaluationLogger', flags.log_dir,
                                'evaluation', logging.INFO)

    # Get the data_sets of units and labels for training, and test.
    data_sets = read_data(file_name=flags.file_name,
                          company=flags.company,
                          label_name=flags.label_name,
                          columns=flags.columns,
                          class_number=class_number,
                          label_profit=label_profit,
                          test_rate=flags.test_rate,
                          shuffle=False)
    data_sets = to_recurrent_data(data_sets, flags.time_step)

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the units and labels.
        units_placeholder, labels_placeholder = placeholder_inputs(
            data_sets.batch_size, flags.time_step, data_sets.column_number)

        # Build a Graph that computes predictions from the inference model.
        logits = inference(units_placeholder, flags.hidden_units,
                           data_sets.column_number, data_sets.class_number,
                           data_sets.batch_size, flags.dropout)

        # Add to the Graph the Ops for loss calculation.
        loss = do_loss(logits, labels_placeholder)

        # Add to the Graph the Ops that calculate and apply gradients.
        train_op = training(loss, flags.learning_rate)

        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = evaluation(logits, labels_placeholder)

        # Add the variable initializer Op.
        init = tf.global_variables_initializer()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Run the Op to initialize the variables.
        sess.run(init)

        logger.info("\t".join(['learning_rate', 'max_steps', 'hidden_units']))
        logger.info("{:f}\t{:d}\t{}".format(flags.learning_rate,
                                            flags.max_steps,
                                            flags.hidden_units))
        logger.info("")
        logger.info(" ".join(
            ['step', 'loss_value', 'training_SSE', 'test_SSE']))
        # Start the training loop.
        for step in range(flags.max_steps + 1):

            # Fill a feed dictionary with the actual set of units and labels
            # for this particular training step.
            feed_dict = fill_feed_dict(data_sets.train, units_placeholder,
                                       labels_placeholder, flags)

            # Run one step of the model.  The return values are the activations
            # from the `train_op` (which is discarded) and the `loss` Op.  To
            # inspect the values of your Ops or variables, you may include them
            # in the list passed to sess.run() and the value tensors will be
            # returned in the tuple from the call.
            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)

            # Save a checkpoint and evaluate the model periodically.
            if step % (flags.max_steps / 100) == 0:
                checkpoint_file = os.path.join(flags.log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_file, global_step=step)
                # Evaluate against the training set.
                training_num_examples, training_true_count, training_precision = do_eval(
                    sess, eval_correct, units_placeholder, labels_placeholder,
                    data_sets.train, flags)
                # Evaluate against the test set.
                test_num_examples, test_true_count, test_precision = do_eval(
                    sess, eval_correct, units_placeholder, labels_placeholder,
                    data_sets.test, flags)
                logger.info("{:d}\t{:f}\t{:f}\t{:f}".format(
                    step, loss_value, training_precision, test_precision))