def main(): try: config = load_config() init_logger('logs/cloudsweep.log') logger = logging.getLogger('clouddump') service = Factory().create( config['service']['driver'], config['service']) service.delete_old_files() logger.info("Program terminated successfully") except SystemExit: logger.info("Program terminated with errors")
def main(): ''' Makes the database dump, compress it and uploads it to the chosen service ''' try: init_logger('logs/clouddump.log') logger = logging.getLogger('clouddump') config = load_config() service = Factory().create( config['service']['driver'], config['service']) date_time = strftime("%Y%m%d%H%M%S", gmtime()) file_name = TMP_DIR + date_time + '_' + config['database']['name'] database = Factory().create( config['database']['driver'], config['database']) dumped_file = database.dump(config['database']['name'], file_name) service.upload(dumped_file) os.remove(dumped_file) logger.info("%s removed from local system" % dumped_file) logger.info("Program terminated successfully") except SystemExit: logger.info("Program terminated with errors")
async def send_data(loop): """ Sending data function """ time_now = time() # controller_url = 'http://{host}:{port}/api/sensor'.format( # host=CONTROLLER_HOST, # port=CONTROLLER_PORT # ) controller_id = str(uuid4()) logger = init_logger( 'sensor', '/var/log/sensor/sensor_{}.log'.format(controller_id), '/var/log/sensor/sensor_debug_{}.log'.format(controller_id), '/var/log/sensor/sensor_error_{}.log'.format(controller_id), debug=DEBUG_LOG) # connector = aiohttp.TCPConnector(limit=100) # async with aiohttp.ClientSession(connector=connector) as session: # while True: # data_to_send = { # 'id': controller_id, # 'payload': randint(0, 9), # 'datetime': datetime.now().strftime('%Y%m%dT%H%M') # } # async with session.post(controller_url, json=data_to_send) as resp: # logger.debug('Data sent: {}'.format(data_to_send)) count = 0 reader, writer = await asyncio.open_connection(CONTROLLER_HOST, CONTROLLER_PORT, loop=loop) while True: data_to_send = { 'id': controller_id, 'payload': randint(0, 9), 'datetime': datetime.now().strftime('%Y%m%dT%H%M') } message = craft_json_http(data_to_send, CONTROLLER_HOST, API_URL) writer.write(message) # res = await reader.read(1024) count += 1 await asyncio.sleep(TIME_TO_SLEEP) if count > 100: logger.debug('Time passed: {} Count: {}'.format( time() - time_now, count)) time_now = time() count = 0
import os from tools import init_logger, env_variable_to_int from manipulator import Manipulator MANIPULATOR_HOST = os.getenv('MANIPULATOR_HOST') MANIPULATOR_PORT = env_variable_to_int('MANIPULATOR_PORT') DEBUG_LOG = True if env_variable_to_int('DEBUG') else False if __name__ == '__main__': logger = init_logger('manipulator', '/var/log/manipulator/info.log', '/var/log/manipulator/debug.log', '/var/log/manipulator/error.log', DEBUG_LOG) logger.info('Manipulator app starting with the following params:' '\nMANIPULATOR_HOST: {}' '\nMANIPULATOR_PORT: {}' '\nDEBUG: {}'.format(MANIPULATOR_HOST, MANIPULATOR_PORT, DEBUG_LOG)) a = Manipulator(MANIPULATOR_HOST, MANIPULATOR_PORT, logger=logger) a.run_server()
def main(): parser = argparse.ArgumentParser() parser.add_argument("--arch", default='albert', type=str) parser.add_argument('--task_name', default='lcqmc', type=str) parser.add_argument("--train_max_seq_len", default=60, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--eval_max_seq_len", default=60, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_test", action='store_true', help="Whether to run eval on the test set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--eval_batch_size", default=16, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--learning_rate", default=2e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.1, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=int, help="Proportion of training to perform linear learning rate warmup for,E.g., 0.1 = 10% of training.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") args = parser.parse_args() args.model_save_path = config['checkpoint_dir'] / f'{args.arch}' args.model_save_path.mkdir(exist_ok=True) # Setudistant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device init_logger(log_file=config['log_dir'] / 'finetuning.log') logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed seed_everything(args.seed) # --------- data processor = BertProcessor(vocab_path=config['bert_dir'] / 'vocab.txt', do_lower_case=args.do_lower_case) label_list = processor.get_labels() num_labels = len(label_list) if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab bert_config = BertConfig.from_json_file(str(config['bert_dir'] / 'bert_config.json')) bert_config.share_parameter_across_layers = True bert_config.num_labels = num_labels logger.info("Training/evaluation parameters %s", args) metrics = Accuracy(topK=1) # Training if args.do_train: train_data = processor.get_train(config['data_dir'] / "train.txt") train_examples = processor.create_examples(lines=train_data, example_type='train', cached_examples_file=config[ 'data_dir'] / f"cached_train_examples_{args.arch}") train_features = processor.create_features(examples=train_examples, max_seq_len=args.train_max_seq_len, cached_features_file=config[ 'data_dir'] / "cached_train_features_{}_{}".format( args.train_max_seq_len, args.arch )) train_dataset = processor.create_dataset(train_features) train_sampler = RandomSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) valid_data = processor.get_dev(config['data_dir'] / "dev.txt") valid_examples = processor.create_examples(lines=valid_data, example_type='valid', cached_examples_file=config[ 'data_dir'] / f"cached_valid_examples_{args.arch}") valid_features = processor.create_features(examples=valid_examples, max_seq_len=args.eval_max_seq_len, cached_features_file=config[ 'data_dir'] / "cached_valid_features_{}_{}".format( args.eval_max_seq_len, args.arch )) valid_dataset = processor.create_dataset(valid_features) valid_sampler = SequentialSampler(valid_dataset) valid_dataloader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.eval_batch_size) model = BertForSequenceClassification.from_pretrained(config['bert_dir'], config=bert_config) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) train(args, train_dataloader, valid_dataloader, metrics, model) if args.do_test: test_data = processor.get_train(config['data_dir'] / "test.txt") test_examples = processor.create_examples(lines=test_data, example_type='test', cached_examples_file=config[ 'data_dir'] / f"cached_test_examples_{args.arch}") test_features = processor.create_features(examples=test_examples, max_seq_len=args.eval_max_seq_len, cached_features_file=config[ 'data_dir'] / "cached_test_features_{}_{}".format( args.eval_max_seq_len, args.arch )) test_dataset = processor.create_dataset(test_features) test_sampler = SequentialSampler(test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.eval_batch_size) model = BertForSequenceClassification.from_pretrained(args.model_save_path, config=bert_config) model.to(args.device) test_log = evaluate(args, model, test_dataloader, metrics) print(test_log)
import os import json import collections from configs.base import config from tools import logger, init_logger from argparse import ArgumentParser from tools import seed_everything from model.tokenization_bert import BertTokenizer import random from progressbar import ProgressBar MaskedLmInstance = collections.namedtuple("MaskedLmInstance", ["index", "label"]) init_logger(log_file=config['log_dir'] / ("pregenerate_training_data.log")) def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens): """Truncates a pair of sequences to a maximum sequence length.""" while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_num_tokens: break trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b assert len(trunc_tokens) >= 1 # We want to sometimes truncate from the front and sometimes from the # back to add more randomness and avoid biases. if random.random() < 0.5: del trunc_tokens[0] else: trunc_tokens.pop()
from tools import logger, init_logger from configs.base import config from torch.utils.data import DataLoader, Dataset, RandomSampler from torch.utils.data.distributed import DistributedSampler from tools import AverageMeter from metrics import LMAccuracy from torch.nn import CrossEntropyLoss, MSELoss from model.modeling_bert import BertForPreTraining, BertConfig from model.file_utils import CONFIG_NAME from model.tokenization_bert import BertTokenizer from model.optimization import AdamW, WarmupLinearSchedule from tools import seed_everything InputFeatures = namedtuple( "InputFeatures", "input_ids input_mask segment_ids lm_label_ids is_next") init_logger(log_file=config['log_dir'] / ("train_bert_model.log")) def convert_example_to_features(example, tokenizer, max_seq_length): tokens = example["tokens"] segment_ids = example["segment_ids"] is_random_next = example["is_random_next"] masked_lm_positions = example["masked_lm_positions"] masked_lm_labels = example["masked_lm_labels"] assert len(tokens) == len( segment_ids ) <= max_seq_length # The preprocessed data should be already truncated input_ids = tokenizer.convert_tokens_to_ids(tokens) masked_label_ids = tokenizer.convert_tokens_to_ids(masked_lm_labels) input_array = np.zeros(max_seq_length, dtype=np.int)
import logging from aiohttp import web from routes import setup_routes from tools import init_logger, env_variable_to_int from tasks import start_background_tasks, cleanup_background_tasks DEBUG_LOG = True if env_variable_to_int('DEBUG') else False logger = init_logger('controller', '/var/log/controller.log', '/var/log/controller_debug.log', '/var/log/controller_error.log', debug=DEBUG_LOG) logger.info( 'STARTING CONTROLLER: DEBUG IS {}'.format('ON' if DEBUG_LOG else 'OFF')) app = web.Application() setup_routes(app) # additional task that communicates with manipulator app.on_startup.append(start_background_tasks) app.on_cleanup.append(cleanup_background_tasks) web.run_app(app)