Esempio n. 1
0
def supported_games():
    output = ""
    for game in config:
        if game != "fallback":
            output += "\t {game!s}".format(game=game)
            if game is not config.keys()[len(config) - 1]:
                output += "\n"
    return output
Esempio n. 2
0
def create_app_for_manager(config_name=None):
    print "app config name: %s\n" % config_name
    if config_name is not None:
        if config_name in config.keys():
            pass
        else:
            print "Wrong app config name: %s. Use default instead." % config_name
            config_name = 'default'
    else:
        config_name = 'default'

    app = create_app(config_name)
    return app
Esempio n. 3
0
def create_app(config_name):
    app = Flask(__name__)
    if not config_name in config:
        raise ValueError("Invalid FLASK_CONFIG, choose one of %s" %
                         str.join(',', config.keys()))
    app.config.from_object(config[config_name])
    config[config_name].init_app(app)

    from .api_1_0 import api as api_1_0_blueprint
    app.register_blueprint(api_1_0_blueprint, url_prefix='/api/1.0')

    from .ui import ui
    app.register_blueprint(ui)

    return app
Esempio n. 4
0
def create_app(config_name):
    app = Flask(__name__, static_folder='dashboard/dist')
    if not config_name in config:
        raise ValueError("Invalid FLASK_CONFIG, choose one of %s" %
                str.join(', ', config.keys()))
    app.config.from_object(config[config_name])
    config[config_name].init_app(app)

    from .api_1_0 import api as api_1_0_blueprint
    app.register_blueprint(api_1_0_blueprint, url_prefix='/api/1.0')

    from .dashboard import dashboard
    app.register_blueprint(dashboard)

    from .docs import docs
    app.register_blueprint(docs, url_prefix='/docs')

    Bower(app)

    return app
Esempio n. 5
0
def sub_cb(topic, message):
	print((topic, message))

	topic = topic.decode()
	message = message.decode().strip("\"' ")

	print((topic, message))

	command = topic.split('/')[-1].strip("\"' ")
	print("command: ", command)

	if (command == "reboot"):
		# Reboot command
		if (message == "yes"):
			mqttSend("Reboot command received. Rebooting...")
			machine.reset()
		else:
			mqttSend("Reboot command received but the message is not as expected so ignore. Message: " + message)
	elif (command == "save"):
		if (message == "yes"):
			mqttSend("Save current configuration into file.")
			saveConfig()
		else:
			mqttSend("Save command received but the message is not as expected so ignore. Message: " + message)
	# Check if received command is known as a configuration name
	elif (command in config.keys()):
		key = command
		newValue = message
		oldValue = config[key]
		if (newValue == ""):
			mqttSend("{key} = {oldValue}".format(key = key, oldValue = oldValue))
		else:
			config[key] = newValue
			mqttSend("{key} has been updated to {newValue} from {oldValue}".format(key = key, oldValue = oldValue, newValue = newValue))
			saveConfig()
	else:
		# Unknown command
		mqttSend("Unknown command: {command}. Ignore.".format(command = command))
Esempio n. 6
0
def filter(event):
    '''Required filter() function that evaluates the lambda event'''
    ''' evaluate the lambda event data and return:
        - None to abort ec2 execution
        - list containing True and an optional list of values to
          pass to the user_data '''

    print("ec2_lambda received {}".format(event))

    subject = event['Records'][0]['Sns']['Subject']
    message = event['Records'][0]['Sns']['Message']

    # if testfolder exists, use it.
    if 'filter_testfolder' in config.keys():
        message = config['filter_testfolder']

    if not _check_prefix_in_bucket(message, config['custom_s3bucket']):
        return(None)

    if subject == 'TeslaCam Upload':
        return([message])
    else:
        print("'Subject did not match 'TeslaCam Upload'")
        return(None)
Esempio n. 7
0
# Packages
import yaml
import boto3
# For pushover
import pushover
import os
# for identifying video file created
import re
from botocore.exceptions import ClientError

import time


# Force a few defaults in case the values aren't specified
for value in ['custom_pushover_token', 'custom_pushover_key']:
    if value not in config.keys():
        config[value] = None

# The functions below should be entirely rewritten based upn the
# particular lambda deployment and use case.
#
#    filter(): evaluate the event data passed in to the lambda to
#    1) determine if the ec2 should launch and to 2) return back a list
#    of values that can be used to format the ec2 user_data.
#    Return: None if the ec2 should not launch or a list of values todo
#    use as substitutions in user_data
#
#    pre_process()
#
#    launched_actions()
#
Esempio n. 8
0
import mandrill
import logging
import sys
import os


application = Flask(__name__)
# FIXME set url prefix for celery tasks
application.register_blueprint(blueprint_app)
application.register_blueprint(blueprint_api, url_prefix='/api')

logger = logging.getLogger(__name__)


# update config from yaml dict
for key in config.keys():
    application.config.update(key=config[key])

# write logs to a file for production
if config['DEBUG'] is not True:
    from logging.handlers import RotatingFileHandler
    file_handler = RotatingFileHandler('python.log', maxBytes=1024 * 1024 * 100, backupCount=20)
    file_handler.setLevel(logging.ERROR)
    formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    file_handler.setFormatter(formatter)
    application.logger.addHandler(file_handler)

# if the upload folder doesn't exist we've just started the application.
if not os.path.exists(config['UPLOAD_FOLDER']):
    logger.error("upload folder didn't exist, creating it now")
    try:
Esempio n. 9
0
from slacker import Slacker
from config import config

API_KEY = None
CHANNEL = None
slacker = None

if 'slack' in config.keys():
    API_KEY = config['slack']['API_KEY']
    CHANNEL = config['slack']['CHANNEL']
    slacker = Slacker(API_KEY)


def post_slack(
    _str=None,
    file_title=None,
    file_path=None,
    image_url=None,
    channel=None,
):
    print('post_slack', _str)
    if slacker is None:
        print('slacker is None')
        return

    if channel is None:
        channel = CHANNEL

    if file_path:
        slacker.files.upload(
            file_path,
def train(device_ids, device) :
    
    #if training stopped suddenly, resume 
    if config['resume']:
        state = torch.load(os.path.join(config['@root'], config['@save'], config['@checkpoint'], config['version']+'.pth'))

        if sum([str(config[key])==str(state['config'][key]) for key in config.keys() if key != 'resume']) != (len(config)-1):
            print('Error : resume with different config')
            print([(config[key], state['config'][key]) 
                   for key in config.keys() if str(config[key]) != str(state['config'][key]) and key !='resume'])
            raise

        folds = range(state['fold'], config['#fold']+1)
        epochs = range(state['epoch']+1, config['#epoch']+1)
        logger.debug('Resume {} : starts from Fold[{}/{}] Epoch[{}/{}]'.format(
            config['version'], state['fold'], config['#fold'], state['epoch']+1, config['#epoch']))
    
    #if not resume then fresh start 
    else :
        folds = range(1, config['#fold']+1)
        epochs = range(1, config['#epoch']+1)
    
    #Cross-validation
    fold_best_metric = []
    for fold in folds:
        
        #bring Dataset
        train_loader = Dataloader(split='train', fold=fold)
        val_loader = Dataloader(split='val', fold=fold)
        
        #define Model
        model = nn.DataParallel(config['model'](config['#class']), device_ids=device_ids)
        model.to(device)
        if config['resume'] and fold == folds[0] : model.load_state_dict(state['model_state'])

        optimizer = config['optimizer'](model.parameters(), lr=config['lr'])
        if config['resume'] and fold == folds[0] : optimizer.load_state_dict(state['optimizer_state'])

        criterion = config['criterion']()
        
        #check performance with model of random initialization
        metric  = run(fold=fold, epoch=-1, grad=False, model=model, optimizer=optimizer,
                                      criterion=criterion, loader=val_loader, device=device)
        logger.info('Random check {} | Fold[{}/{}] \n       Metric {}'.format(
            ' val ', fold, config['#fold'], metric))

        patience = 0
        best_metric = {key:-1 for key in config['metric']}
        for epoch in epochs :
            #train
            model, optimizer, metric = run(fold=fold, epoch=epoch, grad=True, model=model, optimizer=optimizer,
                                      criterion=criterion, loader=train_loader, device=device)
            logger.info('{} | Fold[{}/{}] Epoch[{}/{}] \n       Metric {}'.format(
                'train', fold, config['#fold'], epoch, config['#epoch'], metric))
            
            #validate
            metric  = run(fold=fold, epoch=epoch, grad=False, model=model, optimizer=optimizer,
                                      criterion=criterion, loader=val_loader, device=device)
            logger.info('{} | Fold[{}/{}] Epoch[{}/{}] \n       Metric {}'.format(
                ' val ', fold, config['#fold'], epoch, config['#epoch'], metric))

            #update te best performance
            if metric[config['decision_metric']] > best_metric[config['decision_metric']] :
                best_metric = metric
                save_model(checkpoint=False, fold=fold, epoch=epoch, metric=metric, model=model, optimizer=optimizer)
                logger.debug('Fold[{}/{}] Epoch[{}/{}] Model saved'.format(
                fold, config['#fold'], epoch, config['#epoch']))
                patience = 0
            else :
                patience += 1
                #early stopping
                if patience == config['patience']:
                    logger.debug('Stopped early at Epoch {}'.format(epoch))
                    break
            if epoch % config['checkpoint_term'] == 0:
                save_model(checkpoint=True, fold=fold, epoch=epoch, metric=metric, model=model, optimizer=optimizer)

        if config['#fold'] == 2:
            break

        if config['resume']:
            epochs = range(1, config['#epoch']+1)

        fold_best_metric.append(best_metric)

    logger.info(' -------------------- END OF TRAIN -------------------- ')
    
    #print performance
    for key in config['metric']:
        avg_metric=sum([best_metric[key] for best_metric in fold_best_metric])/len(fold_best_metric)
        logger.info('{}   | Avg {} : {}'.format('val', key, avg_metric))

    #test
    if config['test']:
        test_loader = Dataloader(split='test', fold=0)
        metric_list = []
        for fold in range(1, config['#fold']+1):
            path = os.path.join(config['@root'], config['@save'], config['@best_model'], config['version'], str(fold)+'.pth')
            state = torch.load(path)
            model.load_state_dict(state['model_state'])
            optimizer.load_state_dict(state['optimizer_state'])
            metric = run(fold=fold, epoch=epoch, grad=False, model=model, optimizer=optimizer,
                     criterion=criterion, loader=test_loader, device=device)
            logger.info('{} | Fold[{}/{}] \n       Metric {}'.format(
                'test ', fold, config['#fold'], metric))
            metric_list.append(metric)
        for key in config['metric']:
            avg_metric=sum([metric[key] for metric in metric_list])/len(metric_list)
            logger.info('{}  | Avg {} : {}'.format('test', key, avg_metric))
Esempio n. 11
0
import time
import os
import logging
from lib.diskmapper import DiskMapper
from config import config

logger = logging.getLogger('disk_mapper_daemon')
hdlr = logging.FileHandler('/var/log/disk_mapper.log')
formatter = logging.Formatter('%(asctime)s %(process)d %(thread)d %(filename)s %(lineno)d %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)

logger.setLevel(logging.INFO)
poll_interval = 5

if "params" in config.keys():
    if "log_level" in config["params"].keys():
        log_level = config["params"]["log_level"]
        if log_level == "info":
            logger.setLevel(logging.INFO)
        elif log_level == "error":
            logger.setLevel(logging.ERROR)
        elif log_level == "debug":
            logger.setLevel(logging.DEBUG)

    if poll_interval in config["params"].keys():
        poll_interval = config["params"]["poll_interval"]

def is_daemon_stopped():
    if not os.path.exists("/var/run/disk_mapper.lock"):
        logger.info("=== Disk Mapper Stopped ===")
Esempio n. 12
0
  book={}
  while True:
    logging.info(url)
    title,content,next_url=getall(url)
    print next_url
    #如果一切正常,把它标记为正常页面
    if content!=None and next_url!=None:
      normal_url=url

    #如果下一页为空使用最近的正常页面,如果不是空的话继续下一页
    if next_url==None:
      url=normal_url
    else:
      url=next_url

    #这个页面可能是目录,主要为了防止目录中出现"下一章"
    if next_url.find('index')>0:
      url=normal_url
    if content!=None:
      book=addchapter(book,bookname,title,content)
      for _CreatTime,_Title,_Content in book[bookname]:
         file=open('a.txt','a')
         file.write(_Content.encode('gbk'))
         print _Title
    if url==normal_url:
      config[bookname]=[url]
      yamlwrite(config)
      return
for bookname in config.keys():
  for url in config[bookname]:
    run(bookname,url)
Esempio n. 13
0
import logging
import httplib
import base64
import subprocess
from signal import SIGSTOP, SIGCONT
from config import config
from cgi import parse_qs

logger = logging.getLogger('disk_mapper')
hdlr = logging.FileHandler('/var/log/disk_mapper.log')
formatter = logging.Formatter('%(asctime)s %(process)d %(thread)d %(filename)s %(lineno)d %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)

logger.setLevel(logging.INFO)
if "params" in config.keys() and "log_level" in config["params"].keys():
	log_level = config["params"]["log_level"]
	if log_level == "info":
		logger.setLevel(logging.INFO)
	elif log_level == "error":
		logger.setLevel(logging.ERROR)
	elif log_level == "debug":
		logger.setLevel(logging.DEBUG)

def acquire_lock(lock_file):
    lockfd = open(lock_file, 'w')
    fcntl.flock(lockfd.fileno(), fcntl.LOCK_EX)
    return lockfd

def release_lock(fd):
    fcntl.flock(fd.fileno(), fcntl.LOCK_UN)
Esempio n. 14
0
from models import blueprint_app
from config import config
import mandrill
import logging
import sys
import os

application = Flask(__name__)
# FIXME set url prefix for celery tasks
application.register_blueprint(blueprint_app)
application.register_blueprint(blueprint_api, url_prefix='/api')

logger = logging.getLogger(__name__)

# update config from yaml dict
for key in config.keys():
    application.config.update(key=config[key])

# write logs to a file for production
if config['DEBUG'] is not True:
    from logging.handlers import RotatingFileHandler
    file_handler = RotatingFileHandler('python.log',
                                       maxBytes=1024 * 1024 * 100,
                                       backupCount=20)
    file_handler.setLevel(logging.ERROR)
    formatter = logging.Formatter(
        "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    file_handler.setFormatter(formatter)
    application.logger.addHandler(file_handler)

# if the upload folder doesn't exist we've just started the application.