Exemplo n.º 1
0
def load_comp_basic(session):
    log_file = (os.getcwd() + os.sep + "logs" + os.sep + "load_comp_basic_%s.log") % (
    dt.datetime.now().strftime('%Y-%m-%d'))
    logger = get_logger(log_file,'basic_load')

    logger.info('Daily company basic information load begin')
    logger.info("##########################################")

    logger.info("Begin load data from Tushare")
    # 获得上市公司的基本数据
    code_list = ts.get_stock_basics()
    code_list = code_list.sort_index()
    logger.info(("Get total %d stocks") % (code_list.index.__len__()))
    try:
        logger.info("Truncate table stock.comp_basic")
        session.execute(str("truncate table stock.comp_basic"))
        session.commit()

        logger.info("Insert data into table stock.comp_basic")
        code_list.insert(0,'code',code_list.index)
        code_list.timeToMarket  =code_list.timeToMarket.apply(lambda x:dt.datetime.strptime(x.__str__(),"%Y%m%d") if x !=0 else None)

        engine = session.get_bind()
        code_list.to_sql('comp_basic', engine, schema='stock', index=False, if_exists='append')

        logger.info("Load successfully")

        return code_list
    except Exception as e:
        logger.error("Error when get basic data")
        return pd.DataFrame()
    finally:
        session.close()
Exemplo n.º 2
0
    def __init__(self, path, batch_size, resume):
        """
        Build the AlexNet model
        """
        self.logger = logs.get_logger()

        self.resume = resume
        self.path = path
        self.batch_size = batch_size
        self.lsvrc2010 = LSVRC2010(self.path, batch_size)
        self.num_classes = len(self.lsvrc2010.wnid2label)

        self.lr = 0.001
        self.momentum = 0.9
        self.lambd = tf.constant(0.0005, name='lambda')
        self.input_shape = (None, 227, 227, 3)
        self.output_shape = (None, self.num_classes)

        self.logger.info("Creating placeholders for graph...")
        self.create_tf_placeholders()

        self.logger.info("Creating variables for graph...")
        self.create_tf_variables()

        self.logger.info("Initialize hyper parameters...")
        self.hyper_param = {}
        self.init_hyper_param()
Exemplo n.º 3
0
def generate_wordcloud_png(session,png_path,default_dict,default_word_font_ttf,stopwords,max_font_size=None,begin_code="000000"):

    log_file = (os.getcwd() + os.sep + "logs" + os.sep + "load_word_cloud_%s.log") % (
    dt.datetime.now().strftime('%Y-%m-%d'))
    logger = get_logger(log_file,'word_cloud')

    logger.info('Daily word cloud generation begins')
    logger.info("##########################################")

    logger.info("Begin get basic")

    code_basic = get_all_code_basic(session,begin_code=begin_code)

    for code in code_basic.code:
        logger.info("##########################################")
        logger.info("###Begin to generate for {code}###########".format(code = code))
        logger.info("##########################################")
        logger.info("Begin to generate for {code}".format(code = code))
        cloud_df = ts.get_notices(code)
        all = ""
        if isinstance(cloud_df,pd.DataFrame):
            for url in cloud_df.url:
                all += get_url_content(url)
        after = ' '.join(jieba.cut(all,cut_all=False))
        file = get_word_cloud(after,file_name=code+".png",dict=default_dict,max_words=2000,stopwords=stopwords,folder_path=png_path,max_font_size=max_font_size,font_path=default_word_font_ttf)
        logger.info("##########################################")
        logger.info("{file} was generated successfully !".format(file = file))
        logger.info("##########################################")

    logger.info("All word cloud files were generated successfully !")
Exemplo n.º 4
0
def process_answer(submission: dict):
    logger: Logger = get_logger("process_answer")
    student = submission['xqueue_body']['student_request']
    date = submission['xqueue_header']['submission_time']
    work_script = dict()
    with open('/works/' + student['work'] + '/config.json', 'r+') as conf:
        work_script = json.load(conf)
    logger.info("Student submission: %s", submission)
    logger.debug("laboratory work name: %s", student['work'])

    student_dir = '/submissions/' + student['name'] + '_' + student[
        'work'] + '_' + str(date)
    #clone files to tmp dir
    git_url = student['git']
    # keep only last dir (DO IT AFTER EVERYTHING DONE)
    g = gitclone(git_url, student_dir)
    gout, gerr = g.communicate()
    if g.returncode != 0:
        subprocess.call(['rm', '-rf', student_dir])
        return {'status': 'git clone error', 'log': gerr.decode()}
    if not check_file_presence(student_dir, work_script):
        subprocess.call(['rm', '-rf', student_dir])
        return {'error': 'required files are not present'}
    if work_script['type'] == 'multiple':
        files = prepare_exercises_files(student_dir, work_script)
    else:
        files = prepare_files(student_dir, work_script)
    grades = []
    if work_script['language'] == 'c':
        compiler = 'gcc'
    else:
        compiler = 'g++'
    if work_script['type'] == 'multiple':
        for exercise in work_script['exercises']:
            tests = prepare_tests(exercise['tests'])
            if not ('limits' in exercise):
                limits = work_script['default_limits']
            else:
                limits = exercise['limits']
            grade = grade_epicbox(compiler, files[exercise['name']], tests,
                                  flags, limits, work_script['stop_if_fails'],
                                  work_script['memcheck'])
            grades.append({'name': exercise['name'], 'grade': grade})
            logger.info('task: %s\nGrade: %s', exercise['name'], grade)

    else:
        tests = prepare_tests(work_script['tests'])
        limits = work_script['default_limits']
        flags = ''
        if 'flags' in work_script:
            flags = work_script['flags']
        grade = grade_epicbox(compiler, files, tests, flags, limits,
                              work_script['stop_if_fails'],
                              work_script['memcheck'])
        return grade
    logger.info("task: %s:\nGrade: %s", work_script['name'], grade)
    print('GRADES')
    print(grades)
    return grades
Exemplo n.º 5
0
 def __init__(self, plot_dir, data_dir, data_ext='.pkl', img_ext='.png'):
     """Initialises `self.params` as an empty `dict`
     
     Each dictionary key contains the name of the parameter with the 
     respective value being the data set as a `list`.
     
     :type data_ext: str
     :param data_ext: the file extension used for the data
     
     :type data_ext: str
     :param data_ext: the file extension used for the images
     """
     
     # Set up logger to track progress
     logs.get_logger(self=self)
     self.logger.info('Started runtime parameter manager ...')
     
     # set save directories
     self.data_dir = data_dir
     self.plot_dir = plot_dir
     
     # set up the data structure to hold 
     ## the runtime data
     self.params = {}
     
     ## The image data
     self.imgs = {}
     
     ## The cost data
     self.cost = {}
     
     ## the updates data
     self.updates = {}
     
     self.attrs = [self.params, self.cost, self.imgs, self.updates]
     self.attr_names = ['params', 'cost', 'imgs', 'updates']
     self.attr_dirs = [self.data_dir]*2 + [self.plot_dir, self.data_dir]
     
     # set the extension type
     self.data_ext = data_ext
     self.imgs_ext  = img_ext
     
     # run time file attributes
     self.runtime_files = {}
     self.runtime_files_error = False
     pass
    def run(self, state, current_time_limit, log_file=None):
        logger = get_logger("universal_machine", log_file)

        while True:
            logger.debug(state.to_json())

            if state.instruction_pointer == state.oracle_address:
                state.halt = HaltingCode.CONTINUE
                return

            op_code = state.read(state, state.instruction_pointer)
            if state.halt is not None:
                return

            self._run_operation(state, current_time_limit, op_code)
Exemplo n.º 7
0
def gen_mean_activity(base_dir):
    """
    Generate mean activity for each channel over entire training set

    :param base_dir: Base directory for training
    """
    logger = get_logger('Mean Activity', 'mean.log')
    RGB = np.zeros((3, ))
    lock = Lock()

    def mean_activity_folder(base_dir):
        _RGB = np.zeros((3, ))
        logger.info("Starting directory: %s", base_dir)
        for image in os.listdir(base_dir):
            img = Image.open(os.path.join(base_dir, image))
            img = resize(img)

            npimg = np.array(img)
            _RGB += npimg.mean(axis=(0, 1))

        with lock:
            nonlocal RGB
            RGB += _RGB

        logger.info("Ending directory: %s", base_dir)

    count = 0
    threads = []
    for i, folder in enumerate(os.listdir(os.path.join(base_dir))):
        folder_path = os.path.join(base_dir, folder)
        count += len(os.listdir(folder_path))
        thread = Thread(target=mean_activity_folder, args=(folder_path, ))
        thread.start()
        threads.append(thread)
        if i % 100 == 0:
            for t in threads:
                t.join()
            threads = []

    for t in threads:
        t.join()

    logger.info("RGB: %s, count: %d", str(RGB), count)
    RGB /= count

    with open('mean.pkl', 'wb') as handle:
        pickle.dump(RGB, handle, protocol=pickle.HIGHEST_PROTOCOL)
Exemplo n.º 8
0
def start_grader() -> None:
    logger = get_logger("start_grader")

    try:
        while True:
            listen_to_broker(rabbitmq_example)
            sleep(CONNECTION_RETRY_TIME)
    except AttributeError as exception:
        logger.error(exception, exc_info=True)
    except epicbox.exceptions.DockerError as exception:
        logger.error("Docker error: \n%s.", exception)
    except socket.gaierror:
        logger.error("Unknown host name in queue configuration file.")
    except KeyboardInterrupt:
        logger.info("Program has been stopped manually.")
    except Exception as exception:
        logger.error("Unhandled exception: \n%s.", exception, exc_info=True)
Exemplo n.º 9
0
def generate_wordcloud_png(session,
                           png_path,
                           default_dict,
                           default_word_font_ttf,
                           stopwords,
                           max_font_size=None,
                           begin_code="000000"):

    log_file = (os.getcwd() + os.sep + "logs" + os.sep +
                "load_word_cloud_%s.log") % (
                    dt.datetime.now().strftime('%Y-%m-%d'))
    logger = get_logger(log_file, 'word_cloud')

    logger.info('Daily word cloud generation begins')
    logger.info("##########################################")

    logger.info("Begin get basic")

    code_basic = get_all_code_basic(session, begin_code=begin_code)

    for code in code_basic.code:
        logger.info("##########################################")
        logger.info(
            "###Begin to generate for {code}###########".format(code=code))
        logger.info("##########################################")
        logger.info("Begin to generate for {code}".format(code=code))
        cloud_df = ts.get_notices(code)
        all = ""
        if isinstance(cloud_df, pd.DataFrame):
            for url in cloud_df.url:
                all += get_url_content(url)
        after = ' '.join(jieba.cut(all, cut_all=False))
        file = get_word_cloud(after,
                              file_name=code + ".png",
                              dict=default_dict,
                              max_words=2000,
                              stopwords=stopwords,
                              folder_path=png_path,
                              max_font_size=max_font_size,
                              font_path=default_word_font_ttf)
        logger.info("##########################################")
        logger.info("{file} was generated successfully !".format(file=file))
        logger.info("##########################################")

    logger.info("All word cloud files were generated successfully !")
Exemplo n.º 10
0
    def test(self):
        step = 10

        self.logger_test = logs.get_logger('AlexNetTest', file_name='logs_test.log')
        self.logger_test.info("In Test: Building the graph...")
        self.build_graph()

        init = tf.global_variables_initializer()

        saver = tf.train.Saver()
        top1_count, top5_count, count = 0, 0, 0
        with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
            self.restore_model(sess, saver)

            start = time.time()
            batch = self.lsvrc2010.gen_batch_test
            for i, (patches, labels) in enumerate(batch):
                count += patches[0].shape[0]
                avg_logits = np.zeros((patches[0].shape[0], self.num_classes))
                for patch in patches:
                    logits = sess.run(self.logits,
                                      feed_dict = {
                                          self.input_image: patch,
                                          self.dropout: 1.0
                                      })
                    avg_logits += logits
                avg_logits /= len(patches)
                top1_count += np.sum(np.argmax(avg_logits, 1) == labels)
                top5_count += np.sum(avg_logits.argsort()[:, -5:] == \
                                     np.repeat(labels, 5).reshape(patches[0].shape[0], 5))

                if i % step == 0:
                    end = time.time()
                    self.logger_test.info("Time: %f Step: %d "
                                          "Avg Accuracy: %f "
                                          "Avg Top 5 Accuracy: %f",
                                          end - start, i,
                                          top1_count / count,
                                          top5_count / count)
                    start = time.time()

            self.logger_test.info("Final - Avg Accuracy: %f "
                                  "Avg Top 5 Accuracy: %f",
                                  top1_count / count,
                                  top5_count / count)
Exemplo n.º 11
0
    def __init__(self, layer_type):
        """
        Initialize dataset benchmark objects

        Initialize training and test data.
        Create the model and optimizer

        :param layer_type: Type of the layer;
                           Multilayer perceptron or Convolution
                           The value for this can be: mlp or conv
        :type layer_type: :py:obj:`str`
        """
        self.layer_type = layer_type

        # loss
        self.loss = torch.nn.CrossEntropyLoss()

        # dataset initialization
        self.trainset = torchvision.datasets.MNIST(root='./data', train=True,
                                                   download=True)
        self.testset = torchvision.datasets.MNIST(root='./data', train=False,
                                                  download=True)

        # parameters initialization
        self.hparams = init_hyper_params()

        # layer specific initialization
        self.net = None
        self.optimizer = None
        if layer_type == 'mlp':
            self.net = MaxoutMLPMNIST().to(device)
            self.optimizer = torch.optim.SGD(self.net.parameters(),
                                             lr=self.hparams['lr']['mlp'],
                                             momentum=0.9)
        elif layer_type == 'conv':
            self.net = MaxoutConvMNIST().to(device)
            self.optimizer = torch.optim.SGD(self.net.parameters(),
                                             lr=self.hparams['lr']['conv'],
                                             momentum=0.9)

        self.logger = get_logger()
        self.logger.info(device)
        self.LOGGING_MOD = 100
Exemplo n.º 12
0
def receive_messages(
    host: str, port: int, user: str, password: str, queue: str
) -> None:
    """
    Start consuming messages from RabbitMQ broker.

    :param host: Host of XQueue broker.
    :param port: Port of XQueue broker.
    :param user: Username for basic auth.
    :param password: Password for basic auth.
    :param queue: Queue name.
    """
    logger: Logger = get_logger("rabbitmq")

    connection: BlockingConnection = BlockingConnection(
        ConnectionParameters(
            host=host,
            port=port,
            credentials=credentials.PlainCredentials(user, password),
        )
    )
    ch: channel = connection.channel()

    # Set durable=True to save messages between RabbitMQ restarts
    ch.queue_declare(queue=queue, durable=True)

    # Make RabbitMQ avoid giving more than 1 message at a time to a worker
    ch.basic_qos(prefetch_count=1)

    # Start receiving messages
    ch.basic_consume(queue=queue, on_message_callback=callback_function)

    try:
        logger.info("Started consuming messages from RabbitMQ.")

        ch.start_consuming()
    except KeyboardInterrupt as exception:
        logger.info("Stopped consuming messages from RabbitMQ.")

        ch.stop_consuming()
        connection.close()

        raise exception
Exemplo n.º 13
0
def main_levin_search(
    task: Tasks,
    primitives: Primitives,
    work_tape_size: int = 1000,
    program_tape_size: int = 100,
    search_length: int = 8,
    n_weights: int = 100,
    maxint: int = 10000,
    search_log_file: Path = None,
):
    task = Task(task=task)
    universal_machine = UniversalMachine(primitives)

    initial_program_tape = []
    initial_runtime_limit = 2

    base_program = Program(
        program_tape_size=program_tape_size,
        work_tape_size=work_tape_size,
        n_weights=n_weights,
        maxint=maxint,
    )

    program = run_program(initial_program_tape, initial_runtime_limit,
                          universal_machine, base_program)

    logger = get_logger("levin_search", search_log_file)
    logger.debug("Program;Halting Status;Current Runtime Limit;Phase")

    search_state = SearchState(logger)
    for search_state.phase in tqdm(range(1, search_length + 1),
                                   desc=f"Levin search for task {task.task}"):
        levin_search_phase(
            search_state,
            program,
            initial_program_tape,
            universal_machine,
            task,
            base_program,
        )

    return search_state
Exemplo n.º 14
0
def knn_classifier(input_data, labels, n_neighbors=1, run_for_features=(3, 30, 20)):
    log = logs.get_logger(logger_file=LOGGER_FILE)
    for n in range(1, n_neighbors + 1):
        log.info(f"-Running KNN algorithm with n_neighbors={n_neighbors}")
        # weight = 'distance'
        # metric = 'manhattan'
        weights = ['distance', 'uniform']
        metrics = ['chebyshev', 'euclidean', 'manhattan']
        for metric in metrics:
            for weight in weights:
                classifier = neighbors.KNeighborsClassifier(n_neighbors, weights=weight, metric=metric)
                for k in range(*run_for_features):
                    start_time = time.time()
                    log.info(f"--Finding best {k} features")
                    best_features_data = SelectKBest(f_classif, k=k).fit_transform(input_data, labels)
                    X_train, X_test, Y_train, Y_test = train_test_split(best_features_data, labels, test_size=0.3)

                    log.info(f"---Starting fitting for weight={weight} and metric={metric}")
                    classifier.fit(X_train, Y_train)

                    score = classifier.score(X_test, Y_test)

                    """ Confusion matrix """
                    log.info("----Measuring confusion matrix...")
                    result = classifier.predict(X_test)
                    conf_matrix = confusion_matrix(Y_test, result)

                    log.info(f"-----KNN accuracy is: {score * 100}%")

                    precision = conf_matrix[0][0] / (conf_matrix[0][0] + conf_matrix[0][1]) * 100
                    log.info(f"-----Precision in percent: {precision}%")

                    recall = conf_matrix[0][0] / (conf_matrix[0][0] + conf_matrix[1][0]) * 100
                    log.info(f"-----Recall in percent: {recall}%")

                    false_positives = conf_matrix[0][1] / sum(conf_matrix[0]) * 100
                    log.info(f"-----False positives in percent: {false_positives}%")

                    false_negatives = conf_matrix[1][0] / sum(conf_matrix[1]) * 100
                    log.info(f"-----False negatives in percent: {false_negatives}%")
                    log.info(f"Exec time for {k} features: {time.time() - start_time}s")
Exemplo n.º 15
0
def listen_to_broker(queue_config: Any):
    """
    Listen to chosen message broker.

    :param queue_config: Module containing queue config.
    """
    logger = get_logger("start_grader")

    if queue_config.TYPE == "rabbitmq":
        try:
            rabbitmq_receive(
                queue_config.HOST,
                queue_config.PORT,
                queue_config.USER,
                queue_config.PASS,
                queue_config.QUEUE,
            )
        except pika.exceptions.AMQPConnectionError as exception:
            logger.error("Failed to connect to RabbitMQ broker. %s", exception)
    else:
        logger.error("Unknown message broker type: %s", queue_config.TYPE)
Exemplo n.º 16
0
    def __init__(self, model, game, initial_position):
        """ Initializes a new search tree.

        Args:
            model (Model): The model which is used for evaluation.
            game (e.g. game.Game): The game environment which simulates moves and provides game states.
            initial_position (memory.Position): Simulations will start from this position of the game.
        """
        # initialize logger
        self.logger = logs.get_logger()

        self.model = model
        self.game = game
        self.len_action_space = self.game.get_len_action_space()

        # create root node
        self.root = Node(initial_position.state,
                         initial_position.legal_actions)

        # initial evaluation
        self.evaluate(self.root)
Exemplo n.º 17
0
def load_comp_basic(session):
    log_file = (os.getcwd() + os.sep + "logs" + os.sep +
                "load_comp_basic_%s.log") % (
                    dt.datetime.now().strftime('%Y-%m-%d'))
    logger = get_logger(log_file, 'basic_load')

    logger.info('Daily company basic information load begin')
    logger.info("##########################################")

    logger.info("Begin load data from Tushare")
    # 获得上市公司的基本数据
    code_list = ts.get_stock_basics()
    code_list = code_list.sort_index()
    logger.info(("Get total %d stocks") % (code_list.index.__len__()))
    try:
        logger.info("Truncate table stock.comp_basic")
        session.execute(str("truncate table stock.comp_basic"))
        session.commit()

        logger.info("Insert data into table stock.comp_basic")
        code_list.insert(0, 'code', code_list.index)
        code_list.timeToMarket = code_list.timeToMarket.apply(
            lambda x: dt.datetime.strptime(x.__str__(), "%Y%m%d")
            if x != 0 else None)

        engine = session.get_bind()
        code_list.to_sql('comp_basic',
                         engine,
                         schema='stock',
                         index=False,
                         if_exists='append')

        logger.info("Load successfully")

        return code_list
    except Exception as e:
        logger.error("Error when get basic data")
        return pd.DataFrame()
    finally:
        session.close()
Exemplo n.º 18
0
def malware_data_transform(optimize_data=None, csv_data='MalwareData.csv', csv_sep='|', enable_figures=False):
    log = logs.get_logger(logger_file=LOGGER_FILE)
    log.info("-Reading csv file")
    full_data = pd.read_csv(csv_data, sep=csv_sep)

    pd.set_option("display.max_columns", None)

    labels = full_data['legitimate'].values
    full_data: pd.DataFrame = full_data.drop(['Name', 'md5', 'legitimate'], axis=1)

    if optimize_data == 'normalize':
        log.info("--Data normalization processing...")
        full_data = pd.DataFrame(preprocessing.MinMaxScaler().fit_transform(full_data))
    elif optimize_data == 'standardize':
        log.info("--Data standardization processing...")
        full_data = pd.DataFrame(preprocessing.StandardScaler().fit_transform(full_data))

    if enable_figures:
        log.info("--Figures enabled")
        plot_bar_figures(full_data, optimize_data)

    return full_data.values, labels
Exemplo n.º 19
0
def knn_classifier_final_model(input_data, labels):
    log = logs.get_logger(logger_file=LOGGER_FILE)
    log.info(f"-Running final KNN algorithm model with n_neighbors=8")
    weight = 'distance'
    metric = 'manhattan'

    classifier = neighbors.KNeighborsClassifier(8, weights=weight, metric=metric)
    start_time = time.time()
    log.info(f"--Finding best 30 features")
    best_features_data = SelectKBest(f_classif, k=30).fit_transform(input_data, labels)
    X_train, X_test, Y_train, Y_test = train_test_split(best_features_data, labels, test_size=0.3)

    log.info(f"---Starting fitting for weight={weight} and metric={metric}")
    classifier.fit(X_train, Y_train)

    score = classifier.score(X_test, Y_test)

    """ Confusion matrix """
    log.info("----Measuring confusion matrix...")
    result = classifier.predict(X_test)
    conf_matrix = confusion_matrix(Y_test, result)

    log.info(f"-----KNN accuracy is: {score * 100}%")

    precision = conf_matrix[0][0] / (conf_matrix[0][0] + conf_matrix[0][1]) * 100
    log.info(f"-----Precision in percent: {precision}%")

    recall = conf_matrix[0][0] / (conf_matrix[0][0] + conf_matrix[1][0]) * 100
    log.info(f"-----Recall in percent: {recall}%")

    false_positives = conf_matrix[0][1] / sum(conf_matrix[0]) * 100
    log.info(f"-----False positives in percent: {false_positives}%")

    false_negatives = conf_matrix[1][0] / sum(conf_matrix[1]) * 100
    log.info(f"-----False negatives in percent: {false_negatives}%")
    log.info(f"Exec time for 30 features: {time.time() - start_time}s")
Exemplo n.º 20
0
    def __init__(self, path):
        """
        Build the VGG model
        """
        self.vgg_conf = read_vgg_conf()
        width, height = self.vgg_conf['input_size']
        self.input_images = tf.placeholder(tf.float32,
                                           shape=[None, width, height, 3],
                                           name='input_image')
        self.output_labels = tf.placeholder(
            tf.float32,
            shape=[None, self.vgg_conf['FC19']],
            name='output_labels')
        self.learning_rate = tf.placeholder(tf.float32, name='learning_rate')
        self.dropout = tf.placeholder(tf.float32, name='dropout')

        self.global_step = tf.Variable(tf.constant(0))

        self.path = path
        self.model_path = os.path.join(os.getcwd(), 'model', 'model.ckpt')
        if not os.path.exists(os.path.join(os.getcwd(), 'model')):
            os.mkdir(os.path.join(os.getcwd(), 'model'))

        self.logger = get_logger()
Exemplo n.º 21
0
def load_daily_data(session, begin_code="000000"):
    engine = session.get_bind()

    log_file = (os.getcwd() + os.sep + "logs" + os.sep + "load_daily_%s.log") % (dt.datetime.now().strftime('%Y-%m-%d'))
    logger = get_logger(log_file,'daily_load')

    logger.info('Daily load begin')
    logger.info("##########################################")

    # 获得上市公司的基本数据
    code_list = get_all_code_basic(session,begin_code)#load_comp_basic(session)
    #code_list = code_list[code_list.index >= begin_code]

    # 添加上证深证指数
    s = pd.DataFrame(['sh','sz'],columns=["code"])
    code_list = code_list.append(s,ignore_index=True)

    # 获得数据库中所有股票的max date
    max_date_sql = "select code,max(date)  as max_date from stock.stock_hist group by code"

    # max_date_sql ="select code,max(date) as max_date from stock_hist group by code"
    max_date_df = pd.read_sql(max_date_sql, engine, columns=['code', 'max_date'])

    # 获得所有股票历史信息
    for index in code_list.index:
        row = code_list.iloc[index]
        logger.info(("Load data of code : %s ") % (row.code))
        logger.info(("Time to market : {timeToMarket} ").format(timeToMarket=row.timeToMarket))

        if (not row.timeToMarket) and (row.code not in ('sh','sz')):
            logger.info(("Code : %s has not yet listed ") % (row.code))
            continue

        try:
            print(row.code)
            max_date = max_date_df[max_date_df.code == row.code].max_date.values[0]
            max_date = max_date if isinstance(max_date, dt.date) or isinstance(max_date,
                                                                               dt.datetime) else dt.datetime.strptime(
                    max_date, "%Y-%m-%d")
        except Exception as e:
            logger.info(("Code : %s has no history data ") % (row.code))
            max_date = dt.date(1900, 1, 1)


        next_date_py = max_date + dt.timedelta(days=1)
        next_date_str = next_date_py.strftime('%Y-%m-%d')

        # 从Tushare中拿数据
        one_stock = ts.get_hist_data(row.code, start=next_date_str)
        logger.info(("Max date in existing DW is : %s ") % (next_date_str))

        try:
            if row.code in ('sh', 'sz'):
                one_stock['turnover'] = 0
            one_stock['date'] = one_stock.index
            logger.info(("Code : %s has %d rows need to be inserted") % (row.code, one_stock.index.size))
        except Exception as e:
            logger.error(("Code : %s has something wrong from date %s ") % (row.code, next_date_str))
            break

        one_stock = one_stock.reset_index(drop=True)
        one_stock.insert(0, 'code', row.code, allow_duplicates=True)

        try:
            one_stock.to_sql('stock_hist', engine, index = False, schema='stock', if_exists='append')
            logger.info(("Code : %s updated successfully ") % (row.code))
            logger.info("##########################################")
        except Exception as e:
            logger.error(("Code : %s has something wrong for inserting %s ") % (row.code, next_date_str))
            break
        finally:
            session.close()
Exemplo n.º 22
0
parser.add_option('-z', '--decay', action="store", dest="decay", help="Weight Decay", default="0.0")
parser.add_option('-m', '--lrgamma', action="store", dest="lrgamma", help="Scheduler Learning Rate Gamma", default="1.0")
parser.add_option('-k', '--ttahflip', action="store", dest="ttahflip", help="Bag with horizontal flip on and off", default="F")
parser.add_option('-q', '--ttatranspose', action="store", dest="ttatranspose", help="Bag with horizontal flip on and off", default="F")
parser.add_option('-x', '--datapath', action="store", dest="datapath", help="Data path", default="data")


options, args = parser.parse_args()
package_dir = options.rootpath
sys.path.append(package_dir)
sys.path.insert(0, 'scripts')
from logs import get_logger
from utils import dumpobj, loadobj, GradualWarmupScheduler

# Print info about environments
logger = get_logger(options.logmsg, 'INFO') # noqa
logger.info('Cuda set up : time {}'.format(datetime.datetime.now().time()))

device=torch.device('cuda')
logger.info('Device : {}'.format(torch.cuda.get_device_name(0)))
logger.info('Cuda available : {}'.format(torch.cuda.is_available()))
n_gpu = torch.cuda.device_count()
logger.info('Cuda n_gpus : {}'.format(n_gpu ))


logger.info('Load params : time {}'.format(datetime.datetime.now().time()))
for (k,v) in options.__dict__.items():
    logger.info('{}{}'.format(k.ljust(20), v))

SEED = int(options.seed)
SIZE = int(options.size)
Exemplo n.º 23
0
import json
import datetime

from logs import get_logger
from asynchronous import common_task
from common.utils import user_to_device
from common.mysql_pool import ConnMysql
from rabbitmq.RabbitMQ import new_routing, get_retry_count, fail_task_handle, retry_task_handle, DELAY_MAP

logger = get_logger('wechat_mp')


def call_back(channel, method, properties, body):
    # print(f'>>> 执行回调函数:channel {channel.__dict__}, \n'
    #       f'method {method.__dict__}, \n'
    #       f'properties {properties.__dict__}, \n'
    #       f'body {body}\n')

    message = body.decode().split('.')
    count = get_retry_count(properties)
    try:
        res = 1 / int(message[0])
        logger.info(f'业务处理成功 >>> {res}'.center(100, '*'))

    except ZeroDivisionError:
        if count < 3:
            retry_task_handle(mq, channel, method, properties, body)
            logger.info(
                f'任务执行失败, 消息已发送至延迟队列, {DELAY_MAP[get_retry_count(properties)] / 1000} '
                f'秒后将进行第 {count + 1} 次重试 >>> {datetime.datetime.now()}'.center(
                    100, '*'))
Exemplo n.º 24
0
class SupplementaryView(MethodView):
    decorators = [login_required, auth.permissions_author]

    def get(self):
        return render_template("tasks/elements/supplementary.html")


class TextContentView(MethodView):
    decorators = [login_required, auth.permissions_author]

    def get(self):
        return render_template("tasks/elements/textContent.html")


class ProblemStatementView(MethodView):
    decorators = [login_required, auth.permissions_author]

    def get(self):
        return render_template("tasks/elements/problemStatement.html")


class CoursesTeachingView(MethodView):
    def get(self):
        courses = current_user.get_courses_where_teacher_or_ta()
        courses = [c.serialize for c in courses]
        return flask.json.dumps(courses)


logger = logs.get_logger()
Exemplo n.º 25
0
import logs as logging
logger = logging.get_logger(__name__)


class LogExceptionsMiddleware(object):
    """Logging all exceptions coming from views.
    
    This middleware should come before all middlewares handling views errors and returning a response
    as those errors should be logged explicitly there. 
    """
    
    def process_exception(self, request, exception):
        logger.exception(exception)
        return None
Exemplo n.º 26
0
def grade_epicbox(
    compiler: str,
    prepared_files: dict,
    tests: list,
    flags: str,
    docker_limits: dict,
    stop_if_fails: bool,
    memcheck: bool,
) -> list:
    """
    Running grading script in a separate Docker container.
    https://github.com/StepicOrg/epicbox

    :param submission: Student submission received from message broker.
    :param script_name: Name of the grading script.
    :param prepared_files: List of files and their paths.
    :param docker_profile: Epicbox profile.
    :param docker_limits: Docker container limits.
    :return: Results of grading.
    """
    logger: Logger = get_logger("process_answer")
    PROFILES = {
        'test_code': {
            'docker_image': 'test',
            'user': '******',
            'read_only': False,
            'network_disabled': True,
        }
    }
    epicbox.configure(profiles=PROFILES)
    with epicbox.working_directory() as wd:

        comp = compile_code(compiler, flags, prepared_files, wd)
        if comp['exit_code'] != 0:
            return [{'status': 'CE', 'log': comp['stderr'].decode()}]
        test_results = []
        for test in tests:
            test_case = epicbox.run('test_code',
                                    './main',
                                    stdin=test['input'],
                                    limits=docker_limits,
                                    workdir=wd)
            test_case['stdout'] = test_case['stdout'].decode()
            test_case['stderr'] = test_case['stderr'].decode()
            if test_case['timeout']:
                result = {'status': 'TL', 'test_name': test['test_name'], 'input': test['input'], \
                                            'correct_output': test['output'], 'answer': test_case}
            elif test_case['oom_killed']:
                result = {'status': 'ML', 'test_name': test['test_name'], 'input': test['input'], \
                                            'correct_output': test['output'], 'answer': test_case}
            elif test_case['stdout'] != test['output']:
                result = {'status': 'WA', 'test_name': test['test_name'], 'input': test['input'], \
                                            'correct_output': test['output'], 'answer': test_case}
            else:
                result = {'status': 'OK', 'test_name': test['test_name'], 'input': test['input'], \
                                            'correct_output': test['output'], 'answer': test_case}
            if stop_if_fails == True and result['status'] != 'OK':
                test_results.append(result)
                break
            if memcheck == True and result['status'] == 'OK':
                memory_check = check_valgrind(test_input, limits, wd)
                if memory_check['exitcode'] != 0:
                    result['memcheck'] = {
                        'memcheck': 'ERROR',
                        'log': memory_check['stderr'].decode()
                    }
                else:
                    result['memcheck'] = {'memcheck': 'OK'}
            test_results.append(result)
    logger.debug("Result: %s", result)

    return test_results
Exemplo n.º 27
0
class SupplementaryView(MethodView):
    decorators = [login_required, auth.permissions_author]

    def get(self):
        return render_template("tasks/elements/supplementary.html")


class TextContentView(MethodView):
    decorators = [login_required, auth.permissions_author]

    def get(self):
        return render_template("tasks/elements/textContent.html")


class ProblemStatementView(MethodView):
    decorators = [login_required, auth.permissions_author]

    def get(self):
        return render_template("tasks/elements/problemStatement.html")


class CoursesTeachingView(MethodView):

    def get(self):
        courses = current_user.get_courses_where_teacher_or_ta()
        courses = [c.serialize for c in courses]
        return flask.json.dumps(courses)

logger = logs.get_logger()
Exemplo n.º 28
0
from io import StringIO
import pandas as pd
from sklearn import linear_model
from sklearn.model_selection import cross_val_score
from db import DB_model
from logs import get_logger
import time

logger = get_logger(__name__)


def get_data(raw_csv, target):
    try:
        data = pd.read_csv(StringIO(str(raw_csv)))
        features = []
        for i in range(len(data.columns)):
            if data.columns[i] != target:
                features.append(data.columns[i])
        X = data[features]
        y = data[target]
    except Exception as exc:
        logger.debug(f"something went wrong: {exc}")
        raise ValueError(f"something went wrong: {exc}")
    return X, y, features


def train(X, y, l2_coef, n_folds, fit_intercept=True):
    try:
        l2_coef_score = {}
        for coef in l2_coef:
            reg = linear_model.Ridge(alpha=coef, fit_intercept=fit_intercept)
Exemplo n.º 29
0
import socket
import logs
import peticion
import respuesta

TAMANO_BUFFER = 10240
HOST = '127.0.0.1'
PUERTO = 8888

log = logs.get_logger('Cliente')


class Cliente:
    def enviar_peticion(self, datos):
        if not isinstance(datos, peticion.Peticion):
            raise Exception('Se esperaba una peticion')
        datos = datos.a_json()
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect((HOST, PUERTO))
        log.info('Conectado al servidor')
        sock.sendall(bytes(datos + '\n', 'utf-8'))
        recv = str(sock.recv(TAMANO_BUFFER), 'utf-8')
        log.info('Respuesta desde el servidor: %s' % recv)
        sock.close()
        datos = respuesta.Respuesta()
        datos.desde_json(recv)
        return datos
Exemplo n.º 30
0
import json
import uuid
from flask import request, jsonify

from logs import get_logger
from apis.vsubscribe import api_vsubscribe
from rabbitmq.RabbitMQ import new_routing
from apps.subscribe.models import SessionManager, UserInfo, Admin
from common import wx_http, face_detect
from asynchronous import common_task
from common.utils import object_to_dict, user_to_device

session_manager = SessionManager()
logger = get_logger('subscribe')


@api_vsubscribe.route('/saveUserinfo', methods=['POST'])
def subscribe_save_user():
    code = 200
    data = {}
    message = 'success'

    register = False
    user_info = request.json['userInfo']

    with session_manager.session_execute() as session:
        user = session.query(UserInfo).filter(
            UserInfo.openId == user_info['openId']).first()
        if not user:
            user = UserInfo(nick_name=user_info.get('nickName'),
                            gender=user_info.get('gender'),
Exemplo n.º 31
0
                  default="F")

options, args = parser.parse_args()
package_dir = options.rootpath
sys.path.append(package_dir)
from logs import get_logger
from utils import dumpobj, loadobj, GradualWarmupScheduler

options, args = parser.parse_args()
package_dir = options.rootpath
sys.path.append(package_dir)
from logs import get_logger
from utils import dumpobj, loadobj, GradualWarmupScheduler

# Print info about environments
logger = get_logger(options.logmsg, 'INFO')  # noqa
logger.info('Cuda set up : time {}'.format(datetime.datetime.now().time()))

device = torch.device('cuda')
logger.info('Device : {}'.format(torch.cuda.get_device_name(0)))
logger.info('Cuda available : {}'.format(torch.cuda.is_available()))
n_gpu = torch.cuda.device_count()
logger.info('Cuda n_gpus : {}'.format(n_gpu))

logger.info('Load params : time {}'.format(datetime.datetime.now().time()))
for (k, v) in options.__dict__.items():
    logger.info('{}{}'.format(k.ljust(20), v))

SEED = int(options.seed)
SIZE = int(options.size)
EPOCHS = int(options.epochs)
Exemplo n.º 32
0
def test_logging():
    logger = logs.get_logger()
    logger.debug('pytest log')
Exemplo n.º 33
0
```bash
sudo apt update
sudo apt install python3-gpiozero -y
sudo apt install python-gpiozero -y
```

"""
import os
from time import sleep
import argparse
from logs import get_logger

from gpiozero import LED

if __name__ == "__main__":
    logger = get_logger()
    logger.info("Beginning garage door app.")
    parser = argparse.ArgumentParser("Open/close the garage door.")
    parser.add_argument('--pin',
                        default=os.getenv('GARAGE_DOOR_PIN', 14),
                        required=False,
                        type=int,
                        help="The GPIO pin for the garage door on the Pi.")
    args = parser.parse_args()
    logger.info(
        "Connecting garage door opener to pin ({pin})".format(pin=args.pin))
    try:
        garage_door = LED(args.pin)
        logger.info("Triggering garage door.")
        garage_door.toggle()
        sleep(0.5)
Exemplo n.º 34
0
"""
Dedicated module for global parameters of the project
"""


import logs


LOG = logs.create_logger()
TERMINAL = logs.get_logger("sumo_log").terminal
EXCEPTION = logs.get_logger("sumo_log").exception
EXIT = False

VEHICLES = []
ID_TO_VEHICLE = {}
PLATOON = None
DEFAULT_MAX_SPEED = None
DEFAULT_SPEED = None


def toggle_exit():
    """
    Negates the current value of the EXIT global
    """
    
    global EXIT
    
    EXIT = not EXIT


def set_platoon(platoon):
Exemplo n.º 35
0
"""
WMT14 en -> de
"""

from .util import return_index
import h5py
import numpy as np

import logs

logger = logs.get_logger(__name__)


class WMT14:
    def __init__(self, split, basepath, vocab, in_memory=False):
        self.basepath = basepath
        self.vocab = vocab

        self.src_file = h5py.File(f"{self.basepath}.{split}.en.hdf5", "r")
        self.tgt_file = h5py.File(f"{self.basepath}.{split}.de.hdf5", "r")

        self.src_lang = self.src_file["lang"]
        self.src_lengths = self.src_file["lengths"]
        self.tgt_lang = self.tgt_file["lang"]
        self.tgt_lengths = self.tgt_file["lengths"]
        if in_memory:
            logger.info("Reading into memory (may take a while)")
            self.src_lang = self.src_lang[:]
            self.src_lengths = self.src_lengths[:]
            self.tgt_lang = self.tgt_lang[:]
            self.tgt_lengths = self.tgt_lengths[:]