Esempio n. 1
0
def main():
	config = Config()

	# save policy to output_dir
	if os.path.exists(config.training.output_dir) and config.training.overwrite: # if I want to overwrite the directory
		shutil.rmtree(config.training.output_dir)  # delete an entire directory tree

	if not os.path.exists(config.training.output_dir):
		os.makedirs(config.training.output_dir)

	shutil.copytree('crowd_nav/configs', os.path.join(config.training.output_dir, 'configs'))


	# configure logging
	log_file = os.path.join(config.training.output_dir, 'output.log')
	mode = 'a' if config.training.resume else 'w'
	file_handler = logging.FileHandler(log_file, mode=mode)
	stdout_handler = logging.StreamHandler(sys.stdout)
	level = logging.INFO
	logging.basicConfig(level=level, handlers=[stdout_handler, file_handler],
						format='%(asctime)s, %(levelname)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S")


	torch.manual_seed(config.env.seed)
	torch.cuda.manual_seed_all(config.env.seed)
	if config.training.cuda and torch.cuda.is_available():
		if config.training.cuda_deterministic:
			# reproducible but slower
			torch.backends.cudnn.benchmark = False
			torch.backends.cudnn.deterministic = True
		else:
			# not reproducible but faster
			torch.backends.cudnn.benchmark = True
			torch.backends.cudnn.deterministic = False



	torch.set_num_threads(config.training.num_threads)
	device = torch.device("cuda" if config.training.cuda and torch.cuda.is_available() else "cpu")


	logging.info('Create other envs with new settings')


	# For fastest training: use GRU
	env_name = config.env.env_name
	recurrent_cell = 'GRU'

	if config.sim.render:
		fig, ax = plt.subplots(figsize=(7, 7))
		ax.set_xlim(-6, 6)
		ax.set_ylim(-6, 6)
		ax.set_xlabel('x(m)', fontsize=16)
		ax.set_ylabel('y(m)', fontsize=16)
		plt.ion()
		plt.show()
	else:
		ax = None


	if config.sim.render:
		config.training.num_processes = 1
		config.ppo.num_mini_batch = 1

	# create a manager env
	envs = make_vec_envs(env_name, config.env.seed, config.training.num_processes,
						 config.reward.gamma, None, device, False, config=config, ax=ax)



	actor_critic = Policy(
		envs.observation_space.spaces, # pass the Dict into policy to parse
		envs.action_space,
		base_kwargs=config,
		base=config.robot.policy)

	rollouts = RolloutStorage(config.ppo.num_steps,
							  config.training.num_processes,
							  envs.observation_space.spaces,
							  envs.action_space,
							  config.SRNN.human_node_rnn_size,
							  config.SRNN.human_human_edge_rnn_size,
							  recurrent_cell_type=recurrent_cell)

	if config.training.resume: #retrieve the model if resume = True
		load_path = config.training.load_path
		actor_critic.load_state_dict(torch.load(load_path))
		print("Loaded the following checkpoint:", load_path)


	# allow the usage of multiple GPUs to increase the number of examples processed simultaneously
	nn.DataParallel(actor_critic).to(device)


	agent = algo.PPO(
		actor_critic,
		config.ppo.clip_param,
		config.ppo.epoch,
		config.ppo.num_mini_batch,
		config.ppo.value_loss_coef,
		config.ppo.entropy_coef,
		lr=config.training.lr,
		eps=config.training.eps,
		max_grad_norm=config.training.max_grad_norm)



	obs = envs.reset()
	if isinstance(obs, dict):
		for key in obs:
			rollouts.obs[key][0].copy_(obs[key])
	else:
		rollouts.obs[0].copy_(obs)

	rollouts.to(device)

	episode_rewards = deque(maxlen=100)

	start = time.time()
	num_updates = int(
		config.training.num_env_steps) // config.ppo.num_steps // config.training.num_processes

	for j in range(num_updates):

		if config.training.use_linear_lr_decay:
			utils.update_linear_schedule(
				agent.optimizer, j, num_updates, config.training.lr)

		for step in range(config.ppo.num_steps):
			# Sample actions
			with torch.no_grad():

				rollouts_obs = {}
				for key in rollouts.obs:
					rollouts_obs[key] = rollouts.obs[key][step]
				rollouts_hidden_s = {}
				for key in rollouts.recurrent_hidden_states:
					rollouts_hidden_s[key] = rollouts.recurrent_hidden_states[key][step]
				value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(
					rollouts_obs, rollouts_hidden_s,
					rollouts.masks[step])

			if config.sim.render:
				envs.render()
			# Obser reward and next obs
			obs, reward, done, infos = envs.step(action)
			# print(done)

			for info in infos:
				# print(info.keys())
				if 'episode' in info.keys():
					episode_rewards.append(info['episode']['r'])

			# If done then clean the history of observations.
			masks = torch.FloatTensor(
				[[0.0] if done_ else [1.0] for done_ in done])
			bad_masks = torch.FloatTensor(
				[[0.0] if 'bad_transition' in info.keys() else [1.0]
				 for info in infos])
			rollouts.insert(obs, recurrent_hidden_states, action,
							action_log_prob, value, reward, masks, bad_masks)

		with torch.no_grad():
			rollouts_obs = {}
			for key in rollouts.obs:
				rollouts_obs[key] = rollouts.obs[key][-1]
			rollouts_hidden_s = {}
			for key in rollouts.recurrent_hidden_states:
				rollouts_hidden_s[key] = rollouts.recurrent_hidden_states[key][-1]
			next_value = actor_critic.get_value(
				rollouts_obs, rollouts_hidden_s,
				rollouts.masks[-1]).detach()




		rollouts.compute_returns(next_value, config.ppo.use_gae, config.reward.gamma,
								 config.ppo.gae_lambda, config.training.use_proper_time_limits)

		value_loss, action_loss, dist_entropy = agent.update(rollouts)

		rollouts.after_update()

		# save the model for every interval-th episode or for the last epoch
		if (j % config.training.save_interval == 0
			or j == num_updates - 1) :
			save_path = os.path.join(config.training.output_dir, 'checkpoints')
			if not os.path.exists(save_path):
				os.mkdir(save_path)

			# if you normalized the observation, you may also want to save rms
			# torch.save([
			# 	actor_critic,
			# 	getattr(utils.get_vec_normalize(envs), 'ob_rms', None)
			# ], os.path.join(save_path, '%.5i'%j + ".pt"))

			torch.save(actor_critic.state_dict(), os.path.join(save_path, '%.5i' % j + ".pt"))

		if j % config.training.log_interval == 0 and len(episode_rewards) > 1:
			total_num_steps = (j + 1) * config.training.num_processes * config.ppo.num_steps
			end = time.time()
			print(
				"Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward "
				"{:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n"
					.format(j, total_num_steps,
							int(total_num_steps / (end - start)),
							len(episode_rewards), np.mean(episode_rewards),
							np.median(episode_rewards), np.min(episode_rewards),
							np.max(episode_rewards), dist_entropy, value_loss,
							action_loss))

			df = pd.DataFrame({'misc/nupdates': [j], 'misc/total_timesteps': [total_num_steps],
							   'fps': int(total_num_steps / (end - start)), 'eprewmean': [np.mean(episode_rewards)],
							   'loss/policy_entropy': dist_entropy, 'loss/policy_loss': action_loss,
							   'loss/value_loss': value_loss})

			if os.path.exists(os.path.join(config.training.output_dir, 'progress.csv')) and j > 20:
				df.to_csv(os.path.join(config.training.output_dir, 'progress.csv'), mode='a', header=False, index=False)
			else:
				df.to_csv(os.path.join(config.training.output_dir, 'progress.csv'), mode='w', header=True, index=False)
Esempio n. 2
0
                self.train_phase: False,
                self.dropout_keep: 1.0
            }
            # print(X_item)
            scores = self.sess.run((self.positive), feed_dict=feed_dict)
            scores = scores.reshape(len(X_user))
            scorelist = np.append(scorelist, scores)
            # scorelist.append(scores)
            j = j + self.batch_size
        return scorelist


if __name__ == '__main__':
    logger = logging.getLogger('mylogger')
    logger.setLevel(logging.DEBUG)
    fh = logging.FileHandler('ONCF.log')
    fh.setLevel(logging.DEBUG)
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    # fh.setFormatter(formatter)
    # ch.setFormatter(formatter)
    logger.addHandler(fh)
    logger.addHandler(ch)
    # Data loading
    args = parse_args()
    data = DATA.LoadData(args.path, args.dataset)
    if args.verbose > 0:
        print(
            "FM: dataset=%s, factors=%d,  #epoch=%d, batch=%d, lr=%.4f, lambda=%.1e,optimizer=%s, batch_norm=%d, keep=%.2f"
            % (args.dataset, args.hidden_factor, args.epoch, args.batch_size,
Esempio n. 3
0
def train_net(network,
              mode,
              train_path,
              num_classes,
              batch_size,
              data_shape,
              mean_pixels,
              resume,
              finetune,
              pretrained,
              epoch,
              prefix,
              ctx,
              begin_epoch,
              end_epoch,
              frequent,
              learning_rate,
              momentum,
              weight_decay,
              lr_refactor_step,
              lr_refactor_ratio,
              alpha_bb8=1.0,
              freeze_layer_pattern='',
              num_example=5717,
              label_pad_width=350,
              nms_thresh=0.45,
              force_nms=False,
              ovp_thresh=0.5,
              use_difficult=False,
              class_names=None,
              voc07_metric=False,
              nms_topk=400,
              force_suppress=False,
              train_list="",
              val_path="",
              val_list="",
              iter_monitor=0,
              monitor_pattern=".*",
              log_file=None,
              optimizer='sgd',
              tensorboard=False,
              checkpoint_period=2,
              min_neg_samples=0):
    """
    Wrapper for training phase.

    Parameters:
    ----------
    net : str
        symbol name for the network structure
    train_path : str
        record file path for training
    num_classes : int
        number of object classes, not including background
    batch_size : int
        training batch-size
    data_shape : int or tuple
        width/height as integer or (3, height, width) tuple
    mean_pixels : tuple of floats
        mean pixel values for red, green and blue
    resume : int
        resume from previous checkpoint if > 0
    finetune : int
        fine-tune from previous checkpoint if > 0
    pretrained : str
        prefix of pretrained model, including path
    epoch : int
        load epoch of either resume/finetune/pretrained model
    prefix : str
        prefix for saving checkpoints
    ctx : [mx.cpu()] or [mx.gpu(x)]
        list of mxnet contexts
    begin_epoch : int
        starting epoch for training, should be 0 if not otherwise specified
    end_epoch : int
        end epoch of training
    frequent : int
        frequency to print out training status
    optimizer : str
        usage of different optimizers, other then default sgd
    learning_rate : float
        training learning rate
    momentum : float
        trainig momentum
    weight_decay : float
        training weight decay param
    lr_refactor_ratio : float
        multiplier for reducing learning rate
    lr_refactor_step : comma separated integers
        at which epoch to rescale learning rate, e.g. '30, 60, 90'
    freeze_layer_pattern : str
        regex pattern for layers need to be fixed
    num_example : int
        number of training images
    label_pad_width : int
        force padding training and validation labels to sync their label widths
    nms_thresh : float
        non-maximum suppression threshold for validation
    force_nms : boolean
        suppress overlaped objects from different classes
    train_list : str
        list file path for training, this will replace the embeded labels in record
    val_path : str
        record file path for validation
    val_list : str
        list file path for validation, this will replace the embeded labels in record
    iter_monitor : int
        monitor internal stats in networks if > 0, specified by monitor_pattern
    monitor_pattern : str
        regex pattern for monitoring network stats
    log_file : str
        log to file if enabled
    tensorboard : bool
        record logs into tensorboard
    min_neg_samples : int
        always have some negative examples, no matter how many positive there are.
        this is useful when training on images with no ground-truth.
    checkpoint_period : int
        a checkpoint will be saved every "checkpoint_period" epochs
    """
    # check actual number of train_images
    if os.path.exists(train_path.replace('rec', 'idx')):
        with open(train_path.replace('rec', 'idx'), 'r') as f:
            txt = f.readlines()
        num_example = len(txt)
    # set up logger
    logging.basicConfig()
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    if log_file:
        log_file_path = os.path.join(os.path.dirname(prefix), log_file)
        if not os.path.exists(os.path.dirname(log_file_path)):
            os.makedirs(os.path.dirname(log_file_path))
        fh = logging.FileHandler(log_file_path)
        logger.addHandler(fh)

    # check args
    if isinstance(data_shape, int):
        data_shape = (3, data_shape, data_shape)
    assert len(data_shape) == 3 and data_shape[0] == 3
    if prefix.endswith('_'):
        prefix += '_' + str(data_shape[1])

    if isinstance(mean_pixels, (int, float)):
        mean_pixels = [mean_pixels, mean_pixels, mean_pixels]
    assert len(mean_pixels) == 3, "must provide all RGB mean values"

    train_iter = DetRecordIter(train_path,
                               batch_size,
                               data_shape,
                               mean_pixels=mean_pixels,
                               label_pad_width=label_pad_width,
                               path_imglist=train_list,
                               **cfg.train)
    label = train_iter._batch.label[0].asnumpy()
    if val_path:
        val_iter = DetRecordIter(val_path,
                                 batch_size,
                                 data_shape,
                                 mean_pixels=mean_pixels,
                                 label_pad_width=label_pad_width,
                                 path_imglist=val_list,
                                 **cfg.valid)
        val_label = val_iter._batch.label[0].asnumpy()
    else:
        val_iter = None

    # load symbol
    net = get_symbol_train(network,
                           mode,
                           data_shape[1],
                           alpha_bb8,
                           num_classes=num_classes,
                           nms_thresh=nms_thresh,
                           force_suppress=force_suppress,
                           nms_topk=nms_topk,
                           minimum_negative_samples=min_neg_samples)

    # define layers with fixed weight/bias
    # if freeze_layer_pattern.strip():
    #     re_prog = re.compile(freeze_layer_pattern)
    #     fixed_param_names = [name for name in net.list_arguments() if re_prog.match(name)]
    # else:
    #     fixed_param_names = None
    fixed_param_names = get_fixed_params(net, freeze_layer_pattern)

    # load pretrained or resume from previous state
    ctx_str = '(' + ','.join([str(c) for c in ctx]) + ')'
    if resume > 0:
        logger.info("Resume training with {} from epoch {}".format(
            ctx_str, resume))
        _, args, auxs = mx.model.load_checkpoint(prefix, resume)
        begin_epoch = resume
    elif finetune > 0:
        logger.info("Start finetuning with {} from epoch {}".format(
            ctx_str, finetune))
        _, args, auxs = mx.model.load_checkpoint(prefix, finetune)
        begin_epoch = finetune
        # check what layers mismatch with the loaded parameters
        exe = net.simple_bind(mx.cpu(),
                              data=(1, 3, 300, 300),
                              label=(1, 1, 5),
                              grad_req='null')
        arg_dict = exe.arg_dict
        fixed_param_names = []
        for k, v in arg_dict.items():
            if k in args:
                if v.shape != args[k].shape:
                    del args[k]
                    logging.info("Removed %s" % k)
                else:
                    if not 'pred' in k:
                        fixed_param_names.append(k)
    elif pretrained:
        logger.info("Start training with {} from pretrained model {}".format(
            ctx_str, pretrained))
        _, args, auxs = mx.model.load_checkpoint(pretrained, epoch)
        args = convert_pretrained(pretrained, args)
    else:
        logger.info("Experimental: start training from scratch with {}".format(
            ctx_str))
        args = None
        auxs = None
        fixed_param_names = None

    # helper information
    if fixed_param_names:
        logger.info("Freezed parameters: [" + ','.join(fixed_param_names) +
                    ']')

    # visualize net - both train and test
    # net_visualization(net=net, network=network,data_shape=data_shape[2],
    #                   output_dir=os.path.dirname(prefix), train=True)
    # net_visualization(net=None, network=network, data_shape=data_shape[2],
    #                   output_dir=os.path.dirname(prefix), train=False, num_classes=num_classes)

    # init training module
    data_names = [k[0] for k in train_iter.provide_data]
    label_names = [k[0] for k in train_iter.provide_label]
    mod = mx.mod.Module(net,
                        data_names=data_names,
                        label_names=label_names,
                        logger=logger,
                        context=ctx,
                        fixed_param_names=fixed_param_names)

    batch_end_callback = []
    eval_end_callback = []
    epoch_end_callback = [
        mx.callback.do_checkpoint(prefix, period=checkpoint_period)
    ]

    # add logging to tensorboard
    if tensorboard:
        tensorboard_dir = os.path.join(os.path.dirname(prefix), 'logs')
        if not os.path.exists(tensorboard_dir):
            os.makedirs(os.path.join(tensorboard_dir, 'train', 'scalar'))
            os.makedirs(os.path.join(tensorboard_dir, 'train', 'dist'))
            os.makedirs(os.path.join(tensorboard_dir, 'val', 'roc'))
            os.makedirs(os.path.join(tensorboard_dir, 'val', 'scalar'))
            os.makedirs(os.path.join(tensorboard_dir, 'val', 'images'))
        batch_end_callback.append(
            ParseLogCallback(
                dist_logging_dir=os.path.join(tensorboard_dir, 'train',
                                              'dist'),
                scalar_logging_dir=os.path.join(tensorboard_dir, 'train',
                                                'scalar'),
                logfile_path=log_file_path,
                batch_size=batch_size,
                iter_monitor=iter_monitor,
                frequent=frequent))
        eval_end_callback.append(
            LogMetricsCallback(os.path.join(tensorboard_dir, 'val/scalar'),
                               'ssd',
                               global_step=0))
        # eval_end_callback.append(LogROCCallback(logging_dir=os.path.join(tensorboard_dir, 'val/roc'),
        #                                         roc_path=os.path.join(os.path.dirname(prefix), 'roc'),
        #                                         class_names=class_names))
        # eval_end_callback.append(LogDetectionsCallback(logging_dir=os.path.join(tensorboard_dir, 'val/images'),
        #                                                images_path=os.path.join(os.path.dirname(prefix), 'images'),
        #                                                class_names=class_names,batch_size=batch_size,mean_pixels=mean_pixels))

    # this callback should be the last in a serie of batch_callbacks
    # since it is resetting the metric evaluation every $frequent batches
    batch_end_callback.append(
        mx.callback.Speedometer(train_iter.batch_size, frequent=frequent))

    learning_rate, lr_scheduler = get_lr_scheduler(learning_rate,
                                                   lr_refactor_step,
                                                   lr_refactor_ratio,
                                                   num_example, batch_size,
                                                   begin_epoch)
    logger.info(
        "learning rate: {}, lr refactor step: {}, lr refactor ratio: {}, batch size: {}."
        .format(learning_rate, lr_refactor_step, lr_refactor_ratio,
                batch_size))
    # add possibility for different optimizer
    opt, opt_params = get_optimizer_params(optimizer=optimizer,
                                           learning_rate=learning_rate,
                                           momentum=momentum,
                                           weight_decay=weight_decay,
                                           lr_scheduler=lr_scheduler,
                                           ctx=ctx,
                                           logger=logger)
    logger.info("Optimizer: {}".format(opt))
    for k, v in opt_params.items():
        if k == 'lr_scheduler':
            continue
        logger.info("{}: {}".format(k, v))

    # TODO monitor the gradient flow as in 'https://github.com/dmlc/tensorboard/blob/master/docs/tutorial/understanding-vanish-gradient.ipynb'
    monitor = mx.mon.Monitor(
        iter_monitor, pattern=monitor_pattern) if iter_monitor > 0 else None

    # run fit net, every n epochs we run evaluation network to get mAP
    if voc07_metric:
        valid_metric = VOC07MApMetric(ovp_thresh,
                                      use_difficult,
                                      class_names,
                                      pred_idx=4,
                                      roc_output_path=os.path.join(
                                          os.path.dirname(prefix), 'roc'))
    else:
        valid_metric = MApMetric(ovp_thresh,
                                 use_difficult,
                                 class_names,
                                 pred_idx=4,
                                 roc_output_path=os.path.join(
                                     os.path.dirname(prefix), 'roc'))

    eval_metric = getattr(train_metric, 'MultiBoxMetric_{}'.format(mode))
    mod.fit(
        train_iter,
        val_iter,
        eval_metric=eval_metric(),
        validation_metric=[valid_metric, eval_metric()
                           ],  # use 'valid_metric' for calculate mAP
        batch_end_callback=batch_end_callback,
        eval_end_callback=eval_end_callback,
        epoch_end_callback=epoch_end_callback,
        optimizer=opt,
        optimizer_params=opt_params,
        begin_epoch=begin_epoch,
        num_epoch=end_epoch,
        initializer=mx.init.Xavier(),
        arg_params=args,
        aux_params=auxs,
        allow_missing=True,
        monitor=monitor)
from liota.core.package_manager import LiotaPackage
import logging
import Queue

log = logging.getLogger(__name__)
hdlr = logging.FileHandler('/var/log/liota/liota.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
log.addHandler(hdlr) 

dependencies = ["graphite", "sklearn_edge_component", "examples/windmill_simulator"]

action_taken = Queue.Queue()

class PackageClass(LiotaPackage):
	def create_udm(self, windmill_model):
		
		def get_rpm():
			return windmill_model.get_rpm()

		def get_vib():
			return windmill_model.get_vib()

		def get_action(value):
			log.info("Action: {}".format(value))
			action_taken.put(value)

		def get_action_taken():
			return action_taken.get(block=True)

		self.get_rpm = get_rpm
Esempio n. 5
0
'''
with open('koreanbotsToken.bin', 'rb') as f:
    koreanbotsToken = load(f)

Bots = koreanbots.Client(bot, koreanbotsToken)'''

if 'thinkingbot' in listdir():
    chdir('thinkingbot')

logger = logging.getLogger('thinkingbot_Log')
logger.setLevel(logging.INFO)

stream_handler = logging.StreamHandler()
logger.addHandler(stream_handler)

file_handler = logging.FileHandler(filename="run.log", mode='a')
file_handler.setLevel(logging.ERROR)
logger.addHandler(file_handler)


async def presence():
    await bot.wait_until_ready()
    while not bot.is_closed():
        messages = [
            f'{len(bot.guilds)}서버 {len(bot.users)}유저', f'{mainprefix}도움',
            f'ThinkingBot {version}', 'DM으로 문의하세요'
        ]
        for i in messages:
            await bot.change_presence(status=discord.Status.online,
                                      activity=discord.Game(i))
            await sleep(10)
"""Build full release.
Loose files are packed into a bsa and version numbers are added."""
import config
import logging
import release
import shutil

logger = logging.getLogger(release.__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
handler = logging.FileHandler("{}.log".format(release.__name__), "w")
handler.setFormatter(formatter)
logger.addHandler(handler)
try:
    flags = release.ArchiveFlags()
    flags.check_misc = True
    flags.check_retain_directory_names = True
    flags.check_retain_file_names = True
    release.build_release(dir_src=config.DIR_REPO_LE,
                          temp_alt=config.DIR_TEMP_ALT,
                          arch_exe=config.ARCH_EXE_LE,
                          arch_flags=flags,
                          trim_fomod=True,
                          warn_readmes=False)
    shutil.copytree(config.DIR_SCRIPTS_LE, config.DIR_SCRIPTS_SE)
    release.build_release(dir_src=config.DIR_REPO_SE,
                          temp_alt=config.DIR_TEMP_ALT,
                          arch_exe=config.ARCH_EXE_SE,
                          arch_flags=flags,
                          trim_fomod=True,
                          warn_readmes=False)
Esempio n. 7
0
def main():
    args, args_text = _parse_args()

    # detectron2 data loader ###########################
    # det2_args = default_argument_parser().parse_args()
    det2_args = args
    det2_args.config_file = args.det2_cfg
    cfg = setup(det2_args)
    mapper = PanopticDeeplabDatasetMapper(
        cfg, augmentations=build_sem_seg_train_aug(cfg))
    det2_dataset = iter(build_detection_train_loader(cfg, mapper=mapper))

    # dist init
    torch.distributed.init_process_group(backend='nccl', init_method='env://')
    torch.cuda.set_device(args.local_rank)
    args.world_size = torch.distributed.get_world_size()
    args.local_rank = torch.distributed.get_rank()

    args.save = args.save + args.exp_name

    if args.local_rank == 0:
        create_exp_dir(args.save,
                       scripts_to_save=glob.glob('*.py') + glob.glob('*.sh'))
        logger = SummaryWriter(args.save)
        log_format = '%(asctime)s %(message)s'
        logging.basicConfig(stream=sys.stdout,
                            level=logging.INFO,
                            format=log_format,
                            datefmt='%m/%d %I:%M:%S %p')
        fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
        fh.setFormatter(logging.Formatter(log_format))
        logging.getLogger().addHandler(fh)
        logging.info("args = %s", str(args))
    else:
        logger = None

    # preparation ################
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # config network and criterion ################
    gt_down_sampling = 1
    min_kept = int(args.batch_size * args.image_height * args.image_width //
                   (16 * gt_down_sampling**2))
    ohem_criterion = ProbOhemCrossEntropy2d(ignore_label=255,
                                            thresh=0.7,
                                            min_kept=min_kept,
                                            use_weight=False)

    # data loader ###########################

    kwargs = {
        'num_workers': args.workers,
        'pin_memory': True,
        'drop_last': True
    }
    train_loader, train_sampler, val_loader, val_sampler, num_classes = dataloaders.make_data_loader(
        args, **kwargs)

    with open(args.json_file, 'r') as f:
        # dict_a = json.loads(f, cls=NpEncoder)
        model_dict = json.loads(f.read())

    width_mult_list = [
        4. / 12,
        6. / 12,
        8. / 12,
        10. / 12,
        1.,
    ]
    model = Network(Fch=args.Fch,
                    num_classes=num_classes,
                    stem_head_width=(args.stem_head_width,
                                     args.stem_head_width))

    last = model_dict["lasts"]

    if args.local_rank == 0:
        logging.info("net: " + str(model))
        with torch.cuda.device(0):
            macs, params = get_model_complexity_info(model, (3, 1024, 2048),
                                                     as_strings=True,
                                                     print_per_layer_stat=True,
                                                     verbose=True)
            logging.info('{:<30}  {:<8}'.format('Computational complexity: ',
                                                macs))
            logging.info('{:<30}  {:<8}'.format('Number of parameters: ',
                                                params))

        with open(os.path.join(args.save, 'args.yaml'), 'w') as f:
            f.write(args_text)

    init_weight(model,
                nn.init.kaiming_normal_,
                torch.nn.BatchNorm2d,
                args.bn_eps,
                args.bn_momentum,
                mode='fan_in',
                nonlinearity='relu')

    if args.pretrain:
        model.backbone = load_pretrain(model.backbone, args.pretrain)
    model = model.cuda()

    # if args.sync_bn:
    #     if has_apex:
    #         model = apex.parallel.convert_syncbn_model(model)
    #     else:
    #         model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)

    # Optimizer ###################################
    base_lr = args.base_lr

    if args.opt == "sgd":
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=base_lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    elif args.opt == "adam":
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=base_lr,
                                     betas=(0.9, 0.999),
                                     eps=1e-08)
    elif args.opt == "adamw":
        optimizer = torch.optim.AdamW(model.parameters(),
                                      lr=base_lr,
                                      betas=(0.9, 0.999),
                                      eps=1e-08,
                                      weight_decay=args.weight_decay)
    else:
        optimizer = create_optimizer(args, model)

    if args.sched == "raw":
        lr_scheduler = None
    else:
        max_iteration = len(train_loader) * args.epochs
        lr_scheduler = Iter_LR_Scheduler(args, max_iteration,
                                         len(train_loader))

    start_epoch = 0
    if os.path.exists(os.path.join(args.save, 'last.pth.tar')):
        args.resume = os.path.join(args.save, 'last.pth.tar')

    if args.resume:
        model_state_file = args.resume
        if os.path.isfile(model_state_file):
            checkpoint = torch.load(model_state_file,
                                    map_location=torch.device('cpu'))
            start_epoch = checkpoint['start_epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            logging.info('Loaded checkpoint (starting from iter {})'.format(
                checkpoint['start_epoch']))

    model_ema = None
    if args.model_ema:
        # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
        model_ema = ModelEma(model,
                             decay=args.model_ema_decay,
                             device='cpu' if args.model_ema_force_cpu else '',
                             resume=None)

    if model_ema:
        eval_model = model_ema.ema
    else:
        eval_model = model

    if has_apex:
        model = DDP(model, delay_allreduce=True)
    else:
        model = DDP(model, device_ids=[args.local_rank])

    best_valid_iou = 0.
    best_epoch = 0

    logging.info("rank: {} world_size: {}".format(args.local_rank,
                                                  args.world_size))
    for epoch in range(start_epoch, args.epochs):
        train_sampler.set_epoch(epoch)
        val_sampler.set_epoch(epoch)
        if args.local_rank == 0:
            logging.info(args.load_path)
            logging.info(args.save)
            logging.info("lr: " + str(optimizer.param_groups[0]['lr']))

        # training
        drop_prob = args.drop_path_prob * epoch / args.epochs
        # model.module.drop_path_prob(drop_prob)

        train_mIoU = train(train_loader, det2_dataset, model, model_ema,
                           ohem_criterion, num_classes, lr_scheduler,
                           optimizer, logger, epoch, args, cfg)

        torch.cuda.empty_cache()

        if epoch > args.epochs // 3:
            # if epoch >= 10:
            temp_iou, avg_loss = validation(val_loader,
                                            eval_model,
                                            ohem_criterion,
                                            num_classes,
                                            args,
                                            cal_miou=True)
        else:
            temp_iou = 0.
            avg_loss = -1

        torch.cuda.empty_cache()
        if args.local_rank == 0:
            logging.info("Epoch: {} train miou: {:.2f}".format(
                epoch + 1, 100 * train_mIoU))
            if temp_iou > best_valid_iou:
                best_valid_iou = temp_iou
                best_epoch = epoch

                if model_ema is not None:
                    torch.save(
                        {
                            'start_epoch': epoch + 1,
                            'state_dict': model_ema.ema.state_dict(),
                            'optimizer': optimizer.state_dict(),
                            # 'lr_scheduler': lr_scheduler.state_dict(),
                        },
                        os.path.join(args.save, 'best_checkpoint.pth.tar'))
                else:
                    torch.save(
                        {
                            'start_epoch': epoch + 1,
                            'state_dict': model.module.state_dict(),
                            'optimizer': optimizer.state_dict(),
                            # 'lr_scheduler': lr_scheduler.state_dict(),
                        },
                        os.path.join(args.save, 'best_checkpoint.pth.tar'))

            logger.add_scalar("mIoU/val", temp_iou, epoch)
            logging.info("[Epoch %d/%d] valid mIoU %.4f eval loss %.4f" %
                         (epoch + 1, args.epochs, temp_iou, avg_loss))
            logging.info("Best valid mIoU %.4f Epoch %d" %
                         (best_valid_iou, best_epoch))

            if model_ema is not None:
                torch.save(
                    {
                        'start_epoch': epoch + 1,
                        'state_dict': model_ema.ema.state_dict(),
                        'optimizer': optimizer.state_dict(),
                        # 'lr_scheduler': lr_scheduler.state_dict(),
                    },
                    os.path.join(args.save, 'last.pth.tar'))
            else:
                torch.save(
                    {
                        'start_epoch': epoch + 1,
                        'state_dict': model.module.state_dict(),
                        'optimizer': optimizer.state_dict(),
                        # 'lr_scheduler': lr_scheduler.state_dict(),
                    },
                    os.path.join(args.save, 'last.pth.tar'))
Esempio n. 8
0
import logging
import couchdb

fh = logging.FileHandler('bliss.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logging.getLogger('').addHandler(fh)
logging.getLogger('').addHandler(ch)
logging.getLogger('').setLevel(logging.DEBUG)
logging.getLogger('apiclient').setLevel(logging.WARN)

DEBUG = True
DB = "bliss"
BASEDIR = '/mnt/pub/movies'
RT_KEY = 'X'
FB_KEY = 'Y'

couch = couchdb.Server()
if not DB in couch:
    db = couch.create(DB)
    movie_views = {
        "_id": "_design/movie",
        "language": "python",
        "views": {
            "all": {
                "map":
Esempio n. 9
0
from swap.SwapInterface import SwapInterface
from swap.protocol.SwapDefs import SwapState
from swap.xmltools.XmlSettings import XmlSettings
from MQTT import MQTT
import sys

from nyamuk import nyamuk
import nyamuk.nyamuk_const as NC
import json

import logging
import random

logger = logging.getLogger('lib_temp')
hdlr = logging.FileHandler('/var/log/lib_temp.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr) 
logger.setLevel(logging.INFO)

class SwapManager(SwapInterface):
    """
    SWAP Management Class
    """
    def newMoteDetected(self, mote):
        """
        New mote detected by SWAP server
        
        @param mote: Mote detected
        *****************************
Esempio n. 10
0
      return response.text(log_text)

@app.delete('/delete/<name>')
async def delete_one(req,name):
  async with aiosqlite3.connect(app.config.DB_NAME) as db:
    async with db.execute("delete from user where username=?", (name,)):
      await db.commit()
      log_text=f'finish delete from table: name: {name}'
      logger.info(log_text)
      return response.text(log_text)

@app.delete('/deleteall')
async def delete_all(req):
  db = await aiosqlite3.connect(app.config.DB_NAME)
  await db.execute("delete from user")
  await db.commit()
  log_text='finish delete all tuples in table'
  logger.info(log_text)
  return response.text(log_text)

if __name__ == "__main__":
  # logging
  logger = logging.getLogger(__name__)
  logger.setLevel(level=logging.DEBUG)
  handler = logging.FileHandler('remote_rest.log')
  formatter = logging.Formatter('%(asctime)s %(levelname).1s %(lineno)-3d %(funcName)-20s %(message)s')
  handler.setFormatter(formatter)
  logger.addHandler(handler)
  app.run(host="0.0.0.0", port=8891)

Esempio n. 11
0
import numpy as np
import io
import torch
import logging
from datetime import datetime
import pickle
from core.genetic_algorithm import get_action, Population
from core.utils import do_action, do_action_multiple, fitness_calc
from pyboy import PyBoy
from multiprocessing import Pool, cpu_count

# logging information
logger = logging.getLogger('mario')
logger.setLevel(logging.INFO)

fh = logging.FileHandler('logs.out')
fh.setLevel(logging.INFO)
logger.addHandler(fh)

ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)

epochs = 50
population = None
run_per_child = 1
max_fitness = 0
pop_size = 1
max_score = 999999
#n_workers = cpu_count()
n_workers = 10
Esempio n. 12
0
    'max_messages': 15000
}

bot = commands.AutoShardedBot(**bot_config)
bot.remove_command('help')

bot.message_cache = MessageCache(bot)
bot.refresh_blocked = {}
bot.refresh_queue = {}

# logger
# create logger with 'spam_application'
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('pollmaster.log',  encoding='utf-8', mode='w')
fh.setLevel(logging.INFO)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)

extensions = ['cogs.config', 'cogs.poll_controls', 'cogs.help', 'cogs.db_api', 'cogs.admin']
for ext in extensions:
    bot.load_extension(ext)
Esempio n. 13
0
    log_name = '%s_%d.log' % (dt.strftime('%H-%M-%S'),
                              threading.current_thread().ident)
    return os.path.join(dir_root, log_name)


logger = logging.getLogger('qt4a')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(OutStream(sys.stdout)))
fmt = logging.Formatter(
    '%(asctime)s %(thread)d %(message)s')  # %(filename)s %(funcName)s
logger.handlers[0].setFormatter(fmt)
logger.handlers[0].setLevel(logging.WARNING)  # 屏幕日志级别为WARNING
# logger.addHandler(logging.StreamHandler(sys.stderr))

logger_path = gen_log_path()
file_handler = logging.FileHandler(logger_path)
fmt = logging.Formatter('%(asctime)s %(levelname)s %(thread)d %(message)s'
                        )  # %(filename)s %(funcName)s
file_handler.setFormatter(fmt)
logger.addHandler(file_handler)


def clear_logger_file():
    '''清空log文件
    '''
    if logger_path:
        try:
            f = open(logger_path, 'w')
            f.write('')
            f.close()
        except:
Esempio n. 14
0
from scrapy.exceptions import DropItem
import logging
import pymongo
import json
import os
import logging
from . import settings
# for mysql
import pymysql.cursors

from TweetScraper.items import SearchItem, ProfileItem
from TweetScraper.utils import mkdirs

logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler("insert_error.log")
fh.setLevel(logging.ERROR)
logger.addHandler(fh)


class SavetoMySQLPipeline(object):
    ''' pipeline that save data to mysql '''
    def __init__(self):
        # connect to mysql server
        self.connect = pymysql.Connect(host=settings.HOST,
                                       port=settings.PORT,
                                       user=settings.USER,
                                       passwd=settings.PASSWORD,
                                       db=settings.DB,
                                       sql_mode='')
        self.cursor = self.connect.cursor()
Esempio n. 15
0
    logging.info('using InteractiveBrokers flex file "{}"'.format(full_flex_path))
    with open(full_flex_path, 'r') as ibrokers_response_file:
        ibrokers_response = ibrokers_response_file.read()
        accounts = parse_flex_accounts(ibrokers_response)
        secrets_file_path = os.path.abspath(args.file_secret)
        logging.info('using secrets file "{}"'.format(secrets_file_path))
        with open(secrets_file_path) as json_data:
            secrets_content = json.load(json_data)
            google_credential = secrets_content['google.credential']
            authorized_http, credentials = gservices.authorize_services(google_credential)
            svc_sheet = gservices.create_service_sheets(credentials)
            google_sheet_nav_id = config['google.sheet.navs.id']
            upload_navs(accounts, google_sheet_nav_id, svc_sheet)

if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(name)s:%(levelname)s:%(message)s')
    logging.getLogger('requests').setLevel(logging.WARNING)
    file_handler = logging.FileHandler('update-nav-hist.log', mode='w')
    formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s:%(message)s')
    file_handler.setFormatter(formatter)
    logging.getLogger().addHandler(file_handler)
    parser = argparse.ArgumentParser(description='NAV history update.',
                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter
                                     )
    parser.add_argument('--file-ibrokers-flex', type=str, help='InteractiveBrokers Flex response')
    parser.add_argument('--file-secret', type=str, help='file including secret connection data', default='secrets.json')
    parser.add_argument('--config', type=str, help='file including secret connection data', default='config.json')

    args = parser.parse_args()
    main(args)
Esempio n. 16
0
from flask import Flask, request
from flask_cors import CORS
from thermalprinter import ThermalPrinter

import logging
#import auxiliary_module

# logger
logger = logging.getLogger('keyswitch_tester')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('../thermal_printer.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s, %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)

# params
printer_port = '/dev/tty.usbserial-A601E88T'
baudrate = 9600
flask_host = '0.0.0.0'
flask_port = 5000
doLogging = True

# start printer
#TODO error if cannot connect to the printer
printer = ThermalPrinter(port=printer_port, baudrate=baudrate)

app = Flask(__name__)
CORS(app)

Esempio n. 17
0
def main() -> None:
    """Handle command line arguments and call other modules as needed."""

    p = poster.Poster()
    drawers = {
        "grid": grid_drawer.GridDrawer(p),
        "calendar": calendar_drawer.CalendarDrawer(p),
        "heatmap": heatmap_drawer.HeatmapDrawer(p),
        "circular": circular_drawer.CircularDrawer(p),
        "github": github_drawer.GithubDrawer(p),
    }

    args_parser = argparse.ArgumentParser(prog=__app_name__)
    args_parser.add_argument(
        "--gpx-dir",
        dest="gpx_dir",
        metavar="DIR",
        type=str,
        default=".",
        help="Directory containing GPX files (default: current directory).",
    )
    args_parser.add_argument(
        "--output",
        metavar="FILE",
        type=str,
        default="poster.svg",
        help='Name of generated SVG image file (default: "poster.svg").',
    )
    args_parser.add_argument(
        "--language",
        metavar="LANGUAGE",
        type=str,
        default="",
        help="Language (default: english).",
    )
    args_parser.add_argument(
        "--localedir",
        metavar="DIR",
        type=str,
        help=
        "The directory where the translation files can be found (default: the system's locale directory).",
    )
    args_parser.add_argument(
        "--year",
        metavar="YEAR",
        type=str,
        default="all",
        help=
        'Filter tracks by year; "NUM", "NUM-NUM", "all" (default: all years)',
    )
    args_parser.add_argument("--title",
                             metavar="TITLE",
                             type=str,
                             help="Title to display.")
    args_parser.add_argument(
        "--athlete",
        metavar="NAME",
        type=str,
        default="John Doe",
        help='Athlete name to display (default: "John Doe").',
    )
    args_parser.add_argument(
        "--special",
        metavar="FILE",
        action="append",
        default=[],
        help=
        "Mark track file from the GPX directory as special; use multiple times to mark "
        "multiple tracks.",
    )
    types = '", "'.join(drawers.keys())
    args_parser.add_argument(
        "--type",
        metavar="TYPE",
        default="grid",
        choices=drawers.keys(),
        help=
        f'Type of poster to create (default: "grid", available: "{types}").',
    )
    args_parser.add_argument(
        "--background-color",
        dest="background_color",
        metavar="COLOR",
        type=str,
        default="#222222",
        help='Background color of poster (default: "#222222").',
    )
    args_parser.add_argument(
        "--track-color",
        dest="track_color",
        metavar="COLOR",
        type=str,
        default="#4DD2FF",
        help='Color of tracks (default: "#4DD2FF").',
    )
    args_parser.add_argument(
        "--track-color2",
        dest="track_color2",
        metavar="COLOR",
        type=str,
        help="Secondary color of tracks (default: none).",
    )
    args_parser.add_argument(
        "--text-color",
        dest="text_color",
        metavar="COLOR",
        type=str,
        default="#FFFFFF",
        help='Color of text (default: "#FFFFFF").',
    )
    args_parser.add_argument(
        "--special-color",
        dest="special_color",
        metavar="COLOR",
        default="#FFFF00",
        help='Special track color (default: "#FFFF00").',
    )
    args_parser.add_argument(
        "--special-color2",
        dest="special_color2",
        metavar="COLOR",
        help="Secondary color of special tracks (default: none).",
    )
    args_parser.add_argument(
        "--units",
        dest="units",
        metavar="UNITS",
        type=str,
        choices=["metric", "imperial"],
        default="metric",
        help='Distance units; "metric", "imperial" (default: "metric").',
    )
    args_parser.add_argument(
        "--clear-cache",
        dest="clear_cache",
        action="store_true",
        help="Clear the track cache.",
    )
    args_parser.add_argument(
        "--workers",
        dest="workers",
        metavar="NUMBER_OF_WORKERS",
        type=int,
        help=
        "Number of parallel track loading workers (default: number of CPU cores)",
    )
    args_parser.add_argument(
        "--from-strava",
        dest="from_strava",
        metavar="FILE",
        type=str,
        help="JSON file containing config used to get activities from strava",
    )
    args_parser.add_argument("--verbose",
                             dest="verbose",
                             action="store_true",
                             help="Verbose logging.")
    args_parser.add_argument("--logfile",
                             dest="logfile",
                             metavar="FILE",
                             type=str)
    args_parser.add_argument(
        "--special-distance",
        dest="special_distance",
        metavar="DISTANCE",
        type=float,
        default=10.0,
        help="Special Distance1 by km and color with the special_color",
    )
    args_parser.add_argument(
        "--special-distance2",
        dest="special_distance2",
        metavar="DISTANCE",
        type=float,
        default=20.0,
        help="Special Distance2 by km and color with the special_color2",
    )
    args_parser.add_argument(
        "--min-distance",
        dest="min_distance",
        metavar="DISTANCE",
        type=float,
        default=1.0,
        help="min distance by km for track filter",
    )
    args_parser.add_argument(
        "--with-animation",
        dest="with_animation",
        action="store_true",
        help="If the `poster` contains animation or not",
    )
    args_parser.add_argument(
        "--animation-time",
        dest="animation_time",
        type=int,
        default=30,
        help="Animation show time",
    )

    for _, drawer in drawers.items():
        drawer.create_args(args_parser)

    args = args_parser.parse_args()

    for _, drawer in drawers.items():
        drawer.fetch_args(args)

    log = logging.getLogger("gpxtrackposter")
    log.setLevel(logging.INFO if args.verbose else logging.ERROR)
    if args.logfile:
        handler = logging.FileHandler(args.logfile)
        log.addHandler(handler)

    loader = track_loader.TrackLoader(args.workers)
    loader.set_cache_dir(
        os.path.join(appdirs.user_cache_dir(__app_name__, __app_author__),
                     "tracks"))
    if not loader.year_range.parse(args.year):
        raise ParameterError(f"Bad year range: {args.year}.")

    loader.special_file_names = args.special
    loader.set_min_length(args.min_distance * Units().km)
    if args.clear_cache:
        print("Clearing cache...")
        loader.clear_cache()
    if args.from_strava:
        tracks = loader.load_strava_tracks(args.from_strava)
    else:
        tracks = loader.load_tracks(args.gpx_dir)
    if not tracks:
        if not args.clear_cache:
            print("No tracks found.")
        return

    print(
        f"Creating poster of type {args.type} with {len(tracks)} tracks and storing it in file {args.output}..."
    )
    p.set_language(args.language, args.localedir)
    p.set_athlete(args.athlete)
    p.set_title(args.title if args.title else p.translate("MY TRACKS"))
    p.set_with_animation(args.with_animation)
    p.set_animation_time(args.animation_time)

    p.special_distance = {
        "special_distance": args.special_distance * Units().km,
        "special_distance2": args.special_distance2 * Units().km,
    }

    p.colors = {
        "background": args.background_color,
        "track": args.track_color,
        "track2": args.track_color2 or args.track_color,
        "special": args.special_color,
        "special2": args.special_color2 or args.special_color,
        "text": args.text_color,
    }
    p.units = args.units
    p.set_tracks(tracks)
    if args.type == "github":
        p.height = 55 + p.years.count() * 43
    p.draw(drawers[args.type], args.output)
Esempio n. 18
0
import discord
import logging
import json

# gloabal stuff
client = discord.Client()
description = ''' TickTackToe: Play tick tack toe on discord !! '''
bot = commands.Bot(command_prefix='!tick ',
                   pm_help=False,
                   description=description)

# logger stuff
logger = logging.getLogger('discord')
logger.setLevel(logging.CRITICAL)
handler = logging.FileHandler(filename='discord.log',
                              encoding='utf-8',
                              mode='w')
handler.setFormatter(
    logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)

# extentions / cogs
initial_extensions = ['cogs.game']

# remove_command
bot.remove_command("help")


# event handelers
@bot.event
async def on_ready():
Esempio n. 19
0
LOG_DIR = getattr(settings, 'LOGDIR', '%slogs' % settings.INSTANCE(''))
if not os.path.isdir(LOG_DIR):
    try:
        os.makedirs(LOG_DIR)
    except:
        print "ERROR! no log!"
        raise

LOG_FILE = '%s/downloader-%s.log' % (LOG_DIR, SOURCE_SLUG)

# Setup logging
logger = logging.getLogger('khan.py')
logger.setLevel(logging.DEBUG)
# Use file output for production logging:
filelog = logging.FileHandler(LOG_FILE, 'w')
filelog.setLevel(logging.INFO)
# Use console for development logging:
conlog = logging.StreamHandler()
conlog.setLevel(logging.DEBUG)
# Specify log formatting:
formatter = logging.Formatter("%(asctime)s L%(lineno)s \
%(levelname)s: %(message)s", "%Y-%m-%d %H:%M")
conlog.setFormatter(formatter)
filelog.setFormatter(formatter)
# Add console log to logger
logger.addHandler(conlog)
logger.addHandler(filelog)

# YOUTUBE INSTANVE
youtube.YouTube()
Esempio n. 20
0
import datetime
import datalink  #universal logins for environment
import math
Flag = 0
CCY1 = "AL"
CCY2 = "GN"
Table = 'ALGN'
yClose = 0

logging.basicConfig(filename='DailyOHLC' + str(datetime.date.today()) + '.txt', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)

formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')

fh = logging.FileHandler('DailyOHLC' + str(datetime.date.today()) + '.txt')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)

ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.debug('Starting DailyOHLC')


def truncate(f, n):
    '''Truncates/pads a float f to n decimal places without rounding'''
    s = '{}'.format(f)
    if 'e' in s or 'E' in s:
Esempio n. 21
0
curs = connmysql.cursor(pymysql.cursors.DictCursor)
# listsql = "select `NAME` from INFO"
listsql = " select NAME from coin.LIST  "
# curs.execute(sql)
curs.execute(listsql)
rows = curs.fetchall()

if __name__ == '__main__':
    for i in rows:
        stock_code = i[NAME]
    # stock_code = '005930'  # 삼성전자

        # 로그 기록
        log_dir = os.path.join(settings.BASE_DIR, 'logs/%s' % stock_code)
        timestr = settings.get_time_str()
        file_handler = logging.FileHandler(filename=os.path.join(
            log_dir, "%s_%s.log" % (stock_code, timestr)), encoding='utf-8')
        stream_handler = logging.StreamHandler()
        file_handler.setLevel(logging.DEBUG)
        stream_handler.setLevel(logging.INFO)
        logging.basicConfig(format="%(message)s",
            handlers=[file_handler, stream_handler], level=logging.DEBUG)

        # 주식 데이터 준비
        chart_data = data_manager.load_chart_data(stock_code)
        # chart_data = data_manager.load_chart_data(
        #     os.path.join(settings.BASE_DIR,
        #                  'data/chart_data/{}_rich.csv'.format(stock_code)))
        prep_data = data_manager.preprocess(chart_data)
        training_data = data_manager.build_training_data(prep_data)

        # 기간 필터링
Esempio n. 22
0
import logging
import flatdict
import numpy as np

DIR_NAME = '/home/sylver/Projects/env/queryperformance20170717/'
DIR_LOG = DIR_NAME + 'explain/'
DIR_RES = DIR_NAME + 'res/'
OUT_NAME = '/home/sylver/Projects/env/queryperformance20170717/features/5gb_24mpl/'
# Initialze logger
LOG_FILENAME = 'extract_log'
INPUT_FILE = DIR_NAME + 'run_records/yes'

logger = logging.getLogger('__name__')
logger.setLevel(logging.DEBUG)

handler = logging.FileHandler(LOG_FILENAME)
handler.setLevel(logging.DEBUG)


def extract_single(d_origin, d, flat_data):
    for (key, value) in flat_data.items():
        if key.endswith('Node Type'):
            newkey = key[:key.find('Node Type')]
            d_origin[newkey] = value

    for (out_key, out_value) in d_origin.items():
        item = dict()
        for (in_key, in_value) in flat_data.items():
            pos = in_key.rfind(':') + 1
            if (out_key == in_key[:pos]):
                item[in_key[pos:]] = in_value
Esempio n. 23
0
import logging
import os
sys.path.append('/home/pi/Attendance/Direct')
sys.path.append('/home/pi/Attendance/RPA')

from goout import goout
from gowork import gowork 
from leavework import leavework
from returnwork import returnwork

from goRPA import goRPA
from outRPA import outRPA 

logger = logging.getLogger('Logging')
logger.setLevel(10)
fh = logging.FileHandler('/home/pi/Attendance/log.log')
logger.addHandler(fh)
sh = logging.StreamHandler()
logger.addHandler(sh)

def white_callback(channel):
    print(channel)
    if channel==23:
        print("black callback")
    if GPIO.input(24)==GPIO.HIGH:
        print('shutdown')

			
def black_callback(channel):
    print(channel)
    if channel==24:
Esempio n. 24
0
File: 02.py Progetto: NieHugh/Python
import logging
import logging.handlers
import datetime


# 定义logger

logger = logging.getLogger('mylogger')
logger.setLevel(logging.DEBUG)


# 为两个不同的文件设置两个不同的handler
rf_handler = logging.handlers.TimedRotatingFileHandler('all.log', when='midnight', interval=1, backupCount=7, atTime=datetime.time(0, 0, 0, 0))
rf_handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))


f_handler = logging.FileHandler('error.log')
f_handler.setLevel(logging.ERROR)
f_handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(filename)s[:%(lineno)d] - %(message)s"))


# 把相应的处理器组装到logger上
logger.addHandler(rf_handler)
logger.addHandler(f_handler)



logger.debug('debug message')
logger.info('info message')
logger.warning('warning message')
logger.error('error message')
Esempio n. 25
0
def enable_logging():
    out_hdlr = logging.FileHandler('leetcode-vim.log')
    out_hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
    out_hdlr.setLevel(logging.INFO)
    log.addHandler(out_hdlr)
    log.setLevel(logging.INFO)
Esempio n. 26
0
 def make_log_handler(self):
     self.log_handler = logging.FileHandler(self.logfilename)
     self.log_handler.setFormatter(
         logging.Formatter(fmt="%(levelname)s (%(name)s): %(message)s")
     )
     self.log_handler.setLevel(logging.INFO)
import boto.s3
import urllib.request
import zipfile
import os
import pandas as pd
import sys
from boto.s3.key import Key
import time
import datetime
import boto
from boto.exception import S3ResponseError

root = logging.getLogger()
root.setLevel(logging.DEBUG)

ch1 = logging.FileHandler('problem2_log.log')  #output the logs to a file
ch1.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch1.setFormatter(formatter)
root.addHandler(ch1)


def download_zip(url):
    zips = []
    zips.append(
        urllib.request.urlretrieve(url,
                                   filename='downloaded_zips/' + url[-15:]))


year = '2010'
urll = "http://www.sec.gov/dera/data/Public-EDGAR-log-file-data/"
Esempio n. 28
0
import logging
import datetime

dtime = datetime.datetime.now()
string = dtime.strftime('%Y%m%d-%H%M%S')

logger = logging.getLogger("loggingtest")
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
Esempio n. 29
0
# invite link required? (1=yes)
is_invite = 1

# wave, useful if script is executed repeatedly
wave = 1

# sleep after loop (in secs). Time out is useful as to not make Telegram believe we are a bot
sleep = 361


#log: will be saved to sub directory /log/

logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
logger.addHandler(logging.FileHandler('log/' + logname + '_' + datetime.datetime.now().strftime("%d.%m.%Y_%H.%M") + '.txt', 'a'))
print = logger.info

# check if script is executed rightfully, as SQL content might get removed

continuescript = input('Would you like to continue with this script? (Y/n)')

if continuescript in ['Y', 'y']:
    print('Great. lets continue')
else:
    sys.exit()


time.sleep(5.5)

Esempio n. 30
0
    def _run_function_workflow(self, num_gb, num_threads):
        '''
        Function to run the use_resources() function in a nipype workflow
        and return the runtime stats recorded by the profiler

        Parameters
        ----------
        self : RuntimeProfileTestCase
            a unittest.TestCase-inherited class

        Returns
        -------
        finish_str : string
            a json-compatible dictionary string containing the runtime
            statistics of the nipype node that used system resources
        '''

        # Import packages
        import logging
        import os
        import shutil
        import tempfile

        import nipype.pipeline.engine as pe
        import nipype.interfaces.utility as util
        from nipype.pipeline.plugins.callback_log import log_nodes_cb

        # Init variables
        base_dir = tempfile.mkdtemp()
        log_file = os.path.join(base_dir, 'callback.log')

        # Init logger
        logger = logging.getLogger('callback')
        logger.setLevel(logging.DEBUG)
        handler = logging.FileHandler(log_file)
        logger.addHandler(handler)

        # Declare workflow
        wf = pe.Workflow(name='test_runtime_prof_func')
        wf.base_dir = base_dir

        # Input node
        input_node = pe.Node(util.IdentityInterface(fields=['num_gb',
                                                            'num_threads']),
                             name='input_node')
        input_node.inputs.num_gb = num_gb
        input_node.inputs.num_threads = num_threads

        # Resources used node
        resource_node = pe.Node(util.Function(input_names=['num_threads',
                                                           'num_gb'],
                                              output_names=[],
                                              function=use_resources),
                                name='resource_node')
        resource_node.interface.estimated_memory_gb = num_gb
        resource_node.interface.num_threads = num_threads

        # Connect workflow
        wf.connect(input_node, 'num_gb', resource_node, 'num_gb')
        wf.connect(input_node, 'num_threads', resource_node, 'num_threads')

        # Run workflow
        plugin_args = {'n_procs' : num_threads,
                       'memory' : num_gb,
                       'status_callback' : log_nodes_cb}
        wf.run(plugin='MultiProc', plugin_args=plugin_args)

        # Get runtime stats from log file
        start_str = open(log_file, 'r').readlines()[0].rstrip('\n')
        finish_str = open(log_file, 'r').readlines()[1].rstrip('\n')

        # Delete wf base dir
        shutil.rmtree(base_dir)

        # Return runtime stats
        return start_str, finish_str