예제 #1
0
파일: main.py 프로젝트: indietyp/IoP
 def verify(self):
     try:
         Plant.get(Plant.localhost == True)
         return True
     except Exception as e:
         logger.warning(e)
         return False
def pull_feeds(url, provider, rss_category):
    """
    Pull feeds for an individual category specific Article
    :param url: Category based Provider URL link
    :param provider: Name of the Article Provider
    :param rss_category: Category of the articles
    :return: None
    """
    try:
        # Get provider from the config file
        items = parse_url(url)

        if items:
            # Loop through the items iterator object
            for item in items:
                # if no title or description skip the article
                if not item['title'] and item['description']:
                    continue
                # Update Metadata
                result = update_metadata(item, rss_category, provider)

                # Update Mongo DB
                update_mongo(result, provider)
        else:
            logger.warning(
                "Unable to pull items for `{}` in  `{}` category".format(
                    provider, rss_category))

    except Exception as error:
        logger.error(error)
예제 #3
0
파일: strategy.py 프로젝트: gin66/onvista
    def dogs_result_df(self):
        if not hasattr(self, "dogs_df"):
            res = self._prepare_strategy()
            if res:
                logger.warning(
                    f"Dividend Yield and PE_Ratio for {res} not found. Used fallback instead. It "
                    f"is advised to check actual values online.")

        return self.dogs_df
예제 #4
0
  def alive(self):
    from models.plant import Plant
    from models.plant import PlantNetworkStatus
    from models.plant import PlantNetworkUptime

    daemon = MeshNetwork()
    online = PlantNetworkStatus.select().where(PlantNetworkStatus.name == 'online')
    offline = PlantNetworkStatus.select().where(PlantNetworkStatus.name == 'offline')

    masters = Plant.select().where(Plant.localhost == False, Plant.role == 'master', Plant.active == True)
    masters = list(masters)
    plants = deepcopy(masters)

    local = Plant.get(localhost=True)
    slaves = Plant.select().where(Plant.localhost == False, Plant.role == str(local.uuid))
    plants.extend(list(slaves))

    for plant in plants:
      daemon.alive(plant, 1)
      status = self.get(5)

      online_dataset = None
      offline_dataset = None

      options = [[online, online_dataset, [1]],
                 [offline, offline_dataset, [254, 255]]]

      for i in options:
        i[1] = None
        i[1], _ = PlantNetworkUptime.get_or_create(plant=plant,
                                                   status=i[0],
                                                   defaults={'overall': 0, 'current': 0})

        if status in i[2]:
          i[1].current += 1
          i[1].overall += 1
          i[1].save()

          if plant.role != 'master':
            data = urllib.parse.urlencode({}).encode('ascii')

            for master in masters:
              req = urllib.request.Request('http://{}:2902/update/plant/{}/alive/{}/add'.format(master.ip, str(plant.uuid), i[1].status.name), data)
              try:
                with urllib.request.urlopen(req) as response:
                  response.read().decode('utf8')
              except Exception as e:
                logger.warning('{} - {}: {}'.format(plant.name, master.name, e))

        for dataset in options:
          if dataset[0] != i[0] and i[1].current != 0:
            i[1].current = 0
            i[1].save()
예제 #5
0
def configORDERERS(name, path, offset, yamlContent):  # name means ordererid
    # TODO put these methods to a utils.
    currentDir = os.path.dirname(__file__)

    clusterName = yamlContent["clusterName"]
    PORTSTARTFROM = yamlContent["fabricPortStartFrom"]
    fabricTag = yamlContent["hyperledgerFabricImage"]["fabricTag"]
    fabricKafkaTag = yamlContent["hyperledgerFabricImage"]["fabricKafkaTag"]
    consensusType = yamlContent["consensusType"]
    if consensusType == "kafka":
        ordererTemplate = "fabric_1_0_template_orderer_kafka.yaml"
    elif consensusType == "solo":
        ordererTemplate = "fabric_1_0_template_orderer.yaml"
    else:
        ordererTemplate = "fabric_1_0_template_orderer.yaml"
        logger.warning("WARNING: Unknown orderer type %s. Use solo instead.",
                       ordererYaml["Type"])

    configTemplate = getTemplate(ordererTemplate)
    storedPath = locateDeploymentFile(path)
    deploymentOfOrderer = storedPath + "/" + name + ".yaml"

    mspPathTemplate = 'orderers/{}/msp'
    tlsPathTemplate = 'orderers/{}/tls'
    proPathTemplate = 'orderers/{}/production'

    nameSplit = name.split(".", 1)
    ordererName = nameSplit[0]
    domainName = nameSplit[1]
    ordererOrgs = yamlContent["crypto-config.yaml"]["OrdererOrgs"]
    for ordererOrg in ordererOrgs:
        if domainName == ordererOrg["Domain"]:
            namespace = ordererOrg["Name"].lower() + "-" + clusterName.lower()

    ordererOffset = offset
    exposedPort = PORTSTARTFROM + 2000 + ordererOffset

    notifyDFgeneration(deploymentOfOrderer, configTemplate)
    render(configTemplate,
           deploymentOfOrderer,
           namespace=namespace,
           ordererID=ordererName,
           podName=ordererName + "-" + namespace,
           localMSPID="OrdererMSP",
           mspPath=mspPathTemplate.format(name),
           tlsPath=tlsPathTemplate.format(name),
           proPath=proPathTemplate.format(name),
           nodePort=exposedPort,
           pvcName=namespace + "-pvc",
           fabricTag=fabricTag,
           fabricKafkaTag=fabricKafkaTag)
def fetch():
    json_data_client = client_info.CLIENTS
    if not json_data_client:
        logger.warning(
            "RSS client info is empty. Check the client_info.py file")
        sys.exit(1)

    try:
        # Loop through the clients and individual category RSS site link
        for client in json_data_client:
            base_url = json_data_client[client]["base_url"]
            for category in json_data_client[client]["endpoints"]:
                url = base_url + json_data_client[client]["endpoints"][category]
                pull_feeds(url, client, category)

    except Exception as error:
        logger.error(error)
예제 #7
0
    def _ProcessFileOrDirectory(self, path_spec, parent_id):

        current_display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(
            path_spec)
        #print(current_display_name)

        file_entry = dfvfs_resolver.Resolver.OpenFileEntry(path_spec)
        current_id = file_entry._tsk_file.info.meta.addr

        if file_entry is None:
            logger.warning(
                'Unable to open file entry with path spec: {0:s}'.format(
                    current_display_name))
            return

        if file_entry.IsDirectory():

            for sub_file_entry in file_entry.sub_file_entries:
                try:
                    if not sub_file_entry.IsAllocated():
                        continue

                except dfvfs_errors.BackEndError as exception:
                    logger.warning(
                        'Unable to process file: {0:s} with error: {1!s}'.
                        format(
                            sub_file_entry.path_spec.comparable.replace(
                                '\n', ';'), exception))
                    continue

                if sub_file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK:
                    if file_entry.IsRoot(
                    ) and sub_file_entry.name == '$OrphanFiles':
                        continue

                self._ProcessFileOrDirectory(sub_file_entry.path_spec,
                                             current_id)

        self._InsertFileInfo(file_entry, parent_id=parent_id)
        file_entry = None
예제 #8
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("--arch", default='albert', type=str)
    parser.add_argument('--task_name', default='lcqmc', type=str)
    parser.add_argument("--train_max_seq_len", default=60, type=int,
                        help="The maximum total input sequence length after tokenization. Sequences longer "
                             "than this will be truncated, sequences shorter will be padded.")
    parser.add_argument("--eval_max_seq_len", default=60, type=int,
                        help="The maximum total input sequence length after tokenization. Sequences longer "
                             "than this will be truncated, sequences shorter will be padded.")
    parser.add_argument("--do_train", action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval", action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument("--do_test", action='store_true',
                        help="Whether to run eval on the test set.")
    parser.add_argument("--evaluate_during_training", action='store_true',
                        help="Rul evaluation during training at each logging step.")
    parser.add_argument("--do_lower_case", action='store_true',
                        help="Set this flag if you are using an uncased model.")

    parser.add_argument("--train_batch_size", default=32, type=int,
                        help="Batch size per GPU/CPU for training.")
    parser.add_argument("--eval_batch_size", default=16, type=int,
                        help="Batch size per GPU/CPU for evaluation.")
    parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument("--learning_rate", default=2e-5, type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--weight_decay", default=0.1, type=float,
                        help="Weight deay if we apply some.")
    parser.add_argument("--adam_epsilon", default=1e-8, type=float,
                        help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm", default=1.0, type=float,
                        help="Max gradient norm.")
    parser.add_argument("--num_train_epochs", default=3.0, type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion", default=0.1, type=int,
                        help="Proportion of training to perform linear learning rate warmup for,E.g., 0.1 = 10% of training.")

    parser.add_argument("--eval_all_checkpoints", action='store_true',
                        help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
    parser.add_argument("--no_cuda", action='store_true',
                        help="Avoid using CUDA when available")
    parser.add_argument('--overwrite_output_dir', action='store_true',
                        help="Overwrite the content of the output directory")
    parser.add_argument('--overwrite_cache', action='store_true',
                        help="Overwrite the cached training and evaluation sets")
    parser.add_argument('--seed', type=int, default=42,
                        help="random seed for initialization")

    parser.add_argument('--fp16', action='store_true',
                        help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
    parser.add_argument('--fp16_opt_level', type=str, default='O1',
                        help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
                             "See details at https://nvidia.github.io/apex/amp.html")
    parser.add_argument("--local_rank", type=int, default=-1,
                        help="For distributed training: local_rank")
    parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
    parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
    args = parser.parse_args()

    args.model_save_path = config['checkpoint_dir'] / f'{args.arch}'
    args.model_save_path.mkdir(exist_ok=True)

    # Setudistant debugging if needed
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()

    # Setup CUDA, GPU & distributed training
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        args.n_gpu = torch.cuda.device_count()
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend='nccl')
        args.n_gpu = 1

    args.device = device
    init_logger(log_file=config['log_dir'] / 'finetuning.log')
    logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
                   args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)

    # Set seed
    seed_everything(args.seed)
    # --------- data
    processor = BertProcessor(vocab_path=config['bert_dir'] / 'vocab.txt', do_lower_case=args.do_lower_case)
    label_list = processor.get_labels()
    num_labels = len(label_list)


    if args.local_rank not in [-1, 0]:
        torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab

    bert_config = BertConfig.from_json_file(str(config['bert_dir'] / 'bert_config.json'))

    bert_config.share_parameter_across_layers = True
    bert_config.num_labels = num_labels

    logger.info("Training/evaluation parameters %s", args)
    metrics = Accuracy(topK=1)
    # Training
    if args.do_train:
        train_data = processor.get_train(config['data_dir'] / "train.txt")
        train_examples = processor.create_examples(lines=train_data, example_type='train',
                                                   cached_examples_file=config[
                                                                            'data_dir'] / f"cached_train_examples_{args.arch}")
        train_features = processor.create_features(examples=train_examples, max_seq_len=args.train_max_seq_len,
                                                   cached_features_file=config[
                                                                            'data_dir'] / "cached_train_features_{}_{}".format(
                                                       args.train_max_seq_len, args.arch
                                                   ))
        train_dataset = processor.create_dataset(train_features)
        train_sampler = RandomSampler(train_dataset)
        train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)

        valid_data = processor.get_dev(config['data_dir'] / "dev.txt")
        valid_examples = processor.create_examples(lines=valid_data, example_type='valid',
                                                   cached_examples_file=config[
                                                                            'data_dir'] / f"cached_valid_examples_{args.arch}")
        valid_features = processor.create_features(examples=valid_examples, max_seq_len=args.eval_max_seq_len,
                                                   cached_features_file=config[
                                                                            'data_dir'] / "cached_valid_features_{}_{}".format(
                                                       args.eval_max_seq_len, args.arch
                                                   ))
        valid_dataset = processor.create_dataset(valid_features)
        valid_sampler = SequentialSampler(valid_dataset)
        valid_dataloader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.eval_batch_size)

        model = BertForSequenceClassification.from_pretrained(config['bert_dir'], config=bert_config)
        if args.local_rank == 0:
            torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
        model.to(args.device)
        train(args, train_dataloader, valid_dataloader, metrics, model)
    if args.do_test:
        test_data = processor.get_train(config['data_dir'] / "test.txt")
        test_examples = processor.create_examples(lines=test_data,
                                                  example_type='test',
                                                  cached_examples_file=config[
                                                  'data_dir'] / f"cached_test_examples_{args.arch}")
        test_features = processor.create_features(examples=test_examples,
                                                  max_seq_len=args.eval_max_seq_len,
                                                  cached_features_file=config[
                                                  'data_dir'] / "cached_test_features_{}_{}".format(
                                                      args.eval_max_seq_len, args.arch
                                                  ))
        test_dataset = processor.create_dataset(test_features)
        test_sampler = SequentialSampler(test_dataset)
        test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.eval_batch_size)
        model = BertForSequenceClassification.from_pretrained(args.model_save_path, config=bert_config)
        model.to(args.device)
        test_log = evaluate(args, model, test_dataloader, metrics)
        print(test_log)
예제 #9
0
            raise SDKException(
                "The SDK expected sysroot doesn't exist: {}".format(sysroot))
        logger.info("SDK installed successfully")


def get_sdk_sysroot(sdk, sdk_name):
    if not sdk:
        raise Exception("SDK {} not found".format(sdk_name))
    if "sysroot" not in sdk:
        raise Exception(
            "SDK {} doesn't have a sysroot defined".format(sdk_name))
    return os.path.join(get_sdk_dir(sdk_name), sdk["sysroot"])


def set_default_sdk(sdk_name):
    sdk_name = unalias_name(sdk_name)
    sdk = get_sdk(sdk_name)
    if not is_sdk_installed(sdk_name):
        raise SDKNotInstalled("The SDK {} is not installed".format(sdk_name))


WASI_SDK = get_sdk(CURRENT_SDK)
WASI_SDK_DIR = get_sdk_sysroot(WASI_SDK, CURRENT_SDK)

if __name__ == '__main__':
    try:
        sys.exit(download_and_unpack(CURRENT_SDK))
    except KeyboardInterrupt:
        logger.warning("KeyboardInterrupt")
        sys.exit(1)
예제 #10
0
def error(bot, update, error):
    """Log Errors caused by Updates."""
    bot.send_message(chat_id=update.message.chat_id,
                     text="Произошла какая-то ошибка. Попробуй еще раз.")
    logger.warning('Update "%s" caused error "%s"', update, error)