Exemple #1
0
def main(argv):
    """
    Entry point for etl module.
    """
    option_parser = optparse.OptionParser(usage=DEFAULT_USAGE_TEXT)
    option_parser.add_option("-c", "--config", dest="config",
                             default="config.cfg", help="Configuration file")
    option_parser.add_option("-v", "--verbose", dest="verbose",
                             action="store_true", default=False,
                             help="Show verbose output")
    options, _ = option_parser.parse_args(argv)

    if not os.path.exists(options.config):
        sys.stderr.write("ERROR: {} does not exist\n".format(options.config))
        option_parser.print_help()
        return 1
    config = read_config(options.config)

    log_dir = config['general']['log_dir']
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)
    filename = os.path.join(log_dir, __file__.replace(".py", ".log"))
    setup_logger(filename, options.verbose)
    logging.debug("config={}".format(json.dumps(config, indent=2)))

    retcode = run_etl(config)
    return retcode
Exemple #2
0
def main():
    args = _get_args()
    mkdirp(conf.get_opt("session_dir"), exit_on_err=True)

    if args.mode == "list":
        session_dir = conf.get_opt("session_dir")
    else:
        session_dir = os.path.join(conf.get_opt("session_dir"),
                                   args.session)

    if not os.path.exists(session_dir) and args.mode not in ["create", "list"]:
        fail("Invalid session %s" % args.session)

    vol_dir = os.path.join(session_dir, args.volume)
    if not os.path.exists(vol_dir) and args.mode not in ["create", "list"]:
        fail("Session %s not created with volume %s" %
            (args.session, args.volume))

    mkdirp(os.path.join(conf.get_opt("log_dir"), args.session, args.volume),
           exit_on_err=True)
    log_file = os.path.join(conf.get_opt("log_dir"),
                            args.session,
                            args.volume,
                            "cli.log")
    setup_logger(logger, log_file, args.debug)

    # globals() will have all the functions already defined.
    # mode_<args.mode> will be the function name to be called
    globals()["mode_" + args.mode](session_dir, args)
Exemple #3
0
def main():
    global gtmpfilename

    args = None

    try:
        args = _get_args()
        mkdirp(conf.get_opt("session_dir"), exit_on_err=True)

        # force the default session name if mode is "query"
        if args.mode == "query":
            args.session = "default"

        if args.mode == "list":
            session_dir = conf.get_opt("session_dir")
        else:
            session_dir = os.path.join(conf.get_opt("session_dir"),
                                       args.session)

        if not os.path.exists(session_dir) and \
                args.mode not in ["create", "list", "query"]:
            fail("Invalid session %s" % args.session)

        # "default" is a system defined session name
        if args.mode in ["create", "post", "pre", "delete"] and \
                args.session == "default":
            fail("Invalid session %s" % args.session)

        vol_dir = os.path.join(session_dir, args.volume)
        if not os.path.exists(vol_dir) and args.mode not in \
                ["create", "list", "query"]:
            fail("Session %s not created with volume %s" %
                 (args.session, args.volume))

        mkdirp(os.path.join(conf.get_opt("log_dir"),
                            args.session,
                            args.volume),
               exit_on_err=True)
        log_file = os.path.join(conf.get_opt("log_dir"),
                                args.session,
                                args.volume,
                                "cli.log")
        setup_logger(logger, log_file, args.debug)

        # globals() will have all the functions already defined.
        # mode_<args.mode> will be the function name to be called
        globals()["mode_" + args.mode](session_dir, args)
    except KeyboardInterrupt:
        if args is not None:
            if args.mode == "pre" or args.mode == "query":
                # cleanup session
                if gtmpfilename is not None:
                    # no more interrupts until we clean up
                    signal.signal(signal.SIGINT, signal.SIG_IGN)
                    run_cmd_nodes("cleanup", args, tmpfilename=gtmpfilename)

        # Interrupted, exit with non zero error code
        sys.exit(2)
Exemple #4
0
def init_event_server():
    utils.setup_logger()

    # Delete Socket file if Exists
    try:
        os.unlink(SERVER_ADDRESS)
    except OSError:
        if os.path.exists(SERVER_ADDRESS):
            print ("Failed to cleanup socket file {0}".format(SERVER_ADDRESS),
                   file=sys.stderr)
            sys.exit(1)

    utils.load_all()

    # Start the Eventing Server, UNIX DOMAIN SOCKET Server
    GlusterEventsServer()
    asyncore.loop()
Exemple #5
0
def init_event_server():
    utils.setup_logger()
    utils.load_all()

    port = utils.get_config("port")
    if port is None:
        sys.stderr.write("Unable to get Port details from Config\n")
        sys.exit(1)

    # Start the Eventing Server, UDP Server
    try:
        server = SocketServer.ThreadingUDPServer(
            (SERVER_ADDRESS, port),
            GlusterEventsRequestHandler)
    except socket.error as e:
        sys.stderr.write("Failed to start Eventsd: {0}\n".format(e))
        sys.exit(1)
    server.serve_forever()
Exemple #6
0
def mode_cleanup(args):
    working_dir = os.path.join(conf.get_opt("working_dir"),
                               args.session,
                               args.volume)

    mkdirp(os.path.join(conf.get_opt("log_dir"), args.session, args.volume),
           exit_on_err=True)
    log_file = os.path.join(conf.get_opt("log_dir"),
                            args.session,
                            args.volume,
                            "changelog.log")

    setup_logger(logger, log_file)

    try:
        shutil.rmtree(working_dir, onerror=handle_rm_error)
    except (OSError, IOError) as e:
        logger.error("Failed to delete working directory: %s" % e)
        sys.exit(1)
Exemple #7
0
    def __init__(self):
        print "starting server"
        self.key = '' # generate public/private key
        self.peers = {}
        self.p2pfiles = []  # list of P2PFile
        self.server_id = os.urandom(8).encode('hex')
        self.max_peer_sem = threading.Semaphore(MAX_PEERS)   # This is to control shard serving requests
        self._load_files()  # load metadata and create dirs
        self.logger = utils.setup_logger(LOG_FILE)  # setup logger
        self._load_keys()   # load publickey

        self.heartbeat_thrd = threading.Timer(HEARTBEAT_TIMEOUT, self.check_clients) #Thread to monitor alive peers
        self.heartbeat_thrd.setDaemon(True)
        self.heartbeat_thrd.start()
    def __init__(self, ec2, ec2_client, tag_base_name, **kwargs):
        """Constructor

        Args:
            ec2 (object): Aws Ec2 session
            ec2_client (object): Aws ec2 session
            tag_base_name (string): Tag base name
            **kwargs: Multiple arguments

        Raises:
            TypeError: Description
        """
        BaseResources.__init__(self, ec2, ec2_client, tag_base_name)
        log_level = kwargs.pop("log_level", logging.WARNING)
        boto_log_level = kwargs.pop("boto_log_level", logging.WARNING)

        if kwargs:
            raise TypeError("Unexpected **kwargs: %r" % kwargs)
        self.logger = setup_logger(__name__, log_level, boto_log_level)
Exemple #9
0
def main():
    parser = argparse.ArgumentParser(
        description='pyrasite - inject code into a running python process',
        epilog="For updates, visit https://github.com/lmacken/pyrasite"
        )
    parser.add_argument('pid',
                        help="The ID of the process to inject code into")
    parser.add_argument('filename',
                        help="The second argument must be a filename")
    parser.add_argument('--gdb-prefix', dest='gdb_prefix',
                        help='GDB prefix (if specified during installation)',
                        default="")
    parser.add_argument('--verbose', dest='verbose', help='Verbose mode',
                        default=False, action='store_const', const=True)

    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()

    log = setup_logger()

    try:
        pid = int(args.pid)
    except ValueError:
        log.error("Error: The first argument must be a pid")
        sys.exit(2)

    filename = args.filename
    if filename:
        if not os.path.exists(filename):
            log.error("Error: Invalid path or file doesn't exist")
            sys.exit(3)
    else:
        log.error("Error: The second argument must be a filename")
        sys.exit(4)

    injector = CodeInjector(pid, verbose=args.verbose,
                            gdb_prefix=args.gdb_prefix)
    injector.inject(filename)
Exemple #10
0
    def __init__(self):
        print "Client running at: %s" % str(LOCAL_ADDRESS)
        self.server_key = '' # server's public key
        self.current_peers = {} # current active peers
        self.peer_times = {}
        self.p2pfiles = []  # list of p2p files
        self.shards = []    # list of shards 
        self.peer_id = None
        self.max_peer_sem = threading.Semaphore(MAX_PEERS)   # This is to control shard serving requests
        self._load_files()
        self.logger = utils.setup_logger(LOG_FILE)
        self._reg_with_server()

        self.ping_thrd = threading.Timer(1.0, self._ping_server_thread)
        self.ping_thrd.setDaemon(True)
        self.ping_thrd.start()
        
        self.peer_thrd = threading.Timer(1.0, self._peer_contact_thread)
        self.peer_thrd.setDaemon(True)
        self.peer_thrd.start()

        self.timeout_thrd = threading.Timer(1.0, self._peer_timeout_thread)
        self.timeout_thrd.setDaemon(True)
        self.timeout_thrd.start()
Exemple #11
0
                        default=".")
    parser.add_argument("-N",
                        "--only-namespace-changes",
                        help="List only namespace changes",
                        action="store_true")

    return parser.parse_args()


if __name__ == "__main__":
    args = _get_args()
    mkdirp(os.path.join(conf.get_opt("log_dir"), args.session, args.volume),
           exit_on_err=True)
    log_file = os.path.join(conf.get_opt("log_dir"), args.session, args.volume,
                            "changelog.log")
    setup_logger(logger, log_file, args.debug)

    session_dir = os.path.join(conf.get_opt("session_dir"), args.session)
    status_file = os.path.join(session_dir, args.volume,
                               "%s.status" % urllib.quote_plus(args.brick))
    status_file_pre = status_file + ".pre"
    mkdirp(os.path.join(session_dir, args.volume),
           exit_on_err=True,
           logger=logger)

    try:
        with open(status_file) as f:
            start = int(f.read().strip())
    except (ValueError, OSError, IOError):
        start = args.start
Exemple #12
0
# path to store the doc id and token auth
STORAGE_PATH = os.path.join(os.path.expanduser('~'), '.gdoc_shell')
FILE_ID_PATH = os.path.join(STORAGE_PATH, 'fid')
TOKEN_PATH = os.path.join(STORAGE_PATH, 'token')

# drive params
# using drive.file since it only creates one file
SCOPES = ['https://www.googleapis.com/auth/drive.file']
DOC_MIMETYPE = 'application/vnd.google-apps.document'

# msgs output table
OUTPUT_INVALID_CMD = 'Command is not in the list of valid commands. Please add it to the config if you want to use it.'
OUTPUT_NO_OUTPUT = 'No output'

logger, handler = utils.setup_logger(__name__)

DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'


def _get_http_client(cred_file):
    """Uses project credentials (if exists) along with requested OAuth2 scopes.

    Based from this example:
    https://github.com/gsuitedevs/python-samples/blob/master/docs/mail-merge/docs_mail_merge.py
    """
    store = file.Storage(TOKEN_PATH)
    cred = store.get()
    if not cred or cred.invalid:
        flow = client.flow_from_clientsecrets(cred_file, SCOPES)
        cred = tools.run_flow(flow, store)
Exemple #13
0
    num_col = [col for col in df.columns if df[col].dtype != 'object' and col not in config.unused]
    not_num_col = [col for col in df.columns if col not in num_col and col not in config.unused]

    df = pd.get_dummies(df, columns=not_num_col, dummy_na=True)

    logger.info('Handling Missing Values')
    for col in df.columns:
        if col in num_col:
            df[col].fillna(df[col].mean(), inplace=True)
            if config.SCALING:
                sc = StandardScaler()
                df[col] = sc.fit_transform(df[col].values.reshape(-1, 1))

    logger.info('application shape:{0}'.format(df.shape))
    logger.info('Save data to directory {0}'.format(save_path))

    train = df[~df['TARGET'].isnull()]
    test = df[df['TARGET'].isnull()].drop(['TARGET'], axis=1)
    train.to_pickle(os.path.join(save_path, 'application_train.pickle'))
    test.to_pickle(os.path.join(save_path, 'application_test.pickle'))

    logger.info('Finish Preprocessing')

if __name__ == '__main__':
    NOW = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
    logger = setup_logger('./logs/preprocessing_{0}.log'.format(NOW))
    train_df = pd.read_csv(os.path.join(config.DATA_PATH, 'application_train.csv'), nrows=None)
    test_df = pd.read_csv(os.path.join(config.DATA_PATH, 'application_test.csv'), nrows=None)
    all_df = pd.concat([train_df, test_df])
    application_preprocessing(all_df, logger, config.SAVE_PATH)
Exemple #14
0
import time
from datetime import datetime

from options import Options
import utils

# using the options from code_live_miccai
opt = Options(isTrain=True)
opt.parse()
opt.save_options()

os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
    str(x) for x in opt.train['gpus'])

# set up logger
logger, logger_results = utils.setup_logger(opt)
opt.print_options(logger)

data_transforms = {
    'train':
    transforms.Compose([
        transforms.Resize((299, 299)),
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.RandomRotation(randint(1, 45)),
        transforms.RandomAffine(degrees=0, translate=(0.1, 0.1)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ]),
    'test':
Exemple #15
0
from tqdm import tqdm

import torch
import torch.optim as optim

import data
from model import RNNModel
from utils import process_data, build_unigram_noise, setup_parser, setup_logger
from generic_model import GenModel
from index_gru import IndexGRU
from index_linear import IndexLinear


parser = setup_parser()
args = parser.parse_args()
logger = setup_logger('pt-nce-%s' % args.save)
logger.info(args)

# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
    if not args.cuda:
        logger.warning('You have a CUDA device, so you should probably run with --cuda')
    else:
        torch.cuda.manual_seed(args.seed)

#################################################################
# Load data
#################################################################
corpus = data.Corpus(
    path=args.data,
Exemple #16
0
    def __init__(self, config, model, criterion, weights_init):
        config['trainer']['output_dir'] = os.path.join(
            str(pathlib.Path(os.path.abspath(__name__)).parent),
            config['trainer']['output_dir'])
        config['name'] = config['name'] + '_' + model.name
        self.save_dir = os.path.join(config['trainer']['output_dir'],
                                     config['name'])
        self.checkpoint_dir = os.path.join(self.save_dir, 'checkpoint')

        if config['trainer']['resume_checkpoint'] == '' and config['trainer'][
                'finetune_checkpoint'] == '':
            shutil.rmtree(self.save_dir, ignore_errors=True)
        if not os.path.exists(self.checkpoint_dir):
            os.makedirs(self.checkpoint_dir)

        self.global_step = 0
        self.start_epoch = 1
        self.config = config
        self.model = model
        self.criterion = criterion
        # logger and tensorboard
        self.tensorboard_enable = self.config['trainer']['tensorboard']
        self.epochs = self.config['trainer']['epochs']
        self.display_interval = self.config['trainer']['display_interval']
        if self.tensorboard_enable:
            from torch.utils.tensorboard import SummaryWriter
            self.writer = SummaryWriter(self.save_dir)

        self.logger = setup_logger(os.path.join(self.save_dir, 'train_log'))
        self.logger.info(pformat(self.config))

        # device
        torch.manual_seed(self.config['trainer']['seed'])  # 为CPU设置随机种子
        if len(self.config['trainer']['gpus']) > 0 and torch.cuda.is_available(
        ):
            self.with_cuda = True
            torch.backends.cudnn.benchmark = True
            self.logger.info('train with gpu {} and pytorch {}'.format(
                self.config['trainer']['gpus'], torch.__version__))
            self.gpus = {
                i: item
                for i, item in enumerate(self.config['trainer']['gpus'])
            }
            self.device = torch.device("cuda:0")
            #self.device = torch.device("cuda:1")
            torch.cuda.manual_seed(
                self.config['trainer']['seed'])  # 为当前GPU设置随机种子
            torch.cuda.manual_seed_all(
                self.config['trainer']['seed'])  # 为所有GPU设置随机种子
        else:
            self.with_cuda = False
            self.logger.info('train with cpu and pytorch {}'.format(
                torch.__version__))
            self.device = torch.device("cpu")
        self.logger.info('device {}'.format(self.device))
        self.metrics = {
            'recall': 0,
            'precision': 0,
            'hmean': 0,
            'train_loss': float('inf'),
            'best_model': ''
        }

        self.optimizer = self._initialize('optimizer', torch.optim,
                                          model.parameters())

        if self.config['trainer']['resume_checkpoint'] != '':
            self._laod_checkpoint(self.config['trainer']['resume_checkpoint'],
                                  resume=True)
        elif self.config['trainer']['finetune_checkpoint'] != '':
            self._laod_checkpoint(
                self.config['trainer']['finetune_checkpoint'], resume=False)
        else:
            if weights_init is not None:
                model.apply(weights_init)
        if self.config['lr_scheduler']['type'] != 'PolynomialLR':
            self.scheduler = self._initialize('lr_scheduler',
                                              torch.optim.lr_scheduler,
                                              self.optimizer)

        # 单机多卡
        self.model.to(self.device)
        num_gpus = torch.cuda.device_count()
        if num_gpus > 1:
            self.model = nn.DataParallel(self.model)

        if self.tensorboard_enable:
            try:
                # add graph
                dummy_input = torch.zeros(
                    1, self.config['data_loader']['args']['dataset']
                    ['img_channel'], self.config['data_loader']['args']
                    ['dataset']['input_size'], self.config['data_loader']
                    ['args']['dataset']['input_size']).to(self.device)
                self.writer.add_graph(model, dummy_input)
            except:
                import traceback
                # self.logger.error(traceback.format_exc())
                self.logger.warn('add graph to tensorboard failed')
Exemple #17
0
  0. You just DO WHAT THE F**K YOU WANT TO.
"""

from utils import setup_logger
import ConfigParser
import dns.tsigkeyring
import dns.query
import dns.update
import dns.zone
import base64
import falcon
import uuid
import os

LOGGER = setup_logger("dnsbin")

CONFIG = ConfigParser.ConfigParser()
CONFIG.read('/etc/dnsbin.ini')

KEYRING = dns.tsigkeyring.from_text({
    CONFIG.get('key', 'name') : CONFIG.get('key', 'secret')
})

def chunks(l, n):
    for i in xrange(0, len(l), n):
        yield l[i:i+n]

class Paste(object):
    def on_post(self, req, resp):
        update = dns.update.Update(
Exemple #18
0
    def __init__(self, config, path, train_idx, val_idx):
        # base config
        self.epochs = config.epochs
        self.train_sample_num = config.train_sample_num
        self.val_sample_num = config.val_sample_num
        self.dataset = config.dataset
        self.train_index = train_idx
        self.train_test_num = config.train_test_num
        self.batch_size = config.batch_size
        self.save_model_path = os.path.join(config.save_model_path,
                                            config.dataset, 'LUPVisQ')
        if not os.path.exists(self.save_model_path):
            os.makedirs(self.save_model_path)
        log_name = os.path.join(self.save_model_path, 'train_LUPVisQ.log')
        if os.path.exists(log_name):
            os.remove(log_name)
        self.logger = setup_logger(log_name, 'LUPVisQ')
        self.logger_info(pformat(config))

        # model prepare
        self.repeat_num = config.repeat_num
        self.class_num = config.class_num
        self.channel_num = config.channel_num
        self.tau = config.tau
        self.backbone_type = config.backbone_type
        self.lambda_ = config.lambda_
        self.model_LUPVisQ = models.LUPVisQNet(512, 512, 512, self.class_num,
                                               self.backbone_type,
                                               self.channel_num,
                                               self.tau).cuda()
        # self.model_LUPVisQ.load_state_dict((torch.load('./result/ava_database/LUPVisQ/')))

        # optimizer prepare
        self.lr = config.lr
        self.momentum = config.momentum
        self.weight_decay = config.weight_decay
        self.lr_decay_rate = config.lr_decay_rate
        paras = [
            {
                'params': self.model_LUPVisQ.parameters(),
                'lr': self.lr
            },
        ]
        self.optimizer = torch.optim.SGD(paras,
                                         momentum=self.momentum,
                                         weight_decay=1e-4)

        # dataset prepare
        train_loader = data_loader.LUPVisQDataLoader(
            config.dataset,
            path,
            train_idx,
            config.patch_size,
            config.train_sample_num,
            batch_size=config.batch_size,
            num_workers=config.num_workers,
            istrain='train')
        val_loader = data_loader.LUPVisQDataLoader(
            config.dataset,
            path,
            val_idx,
            config.patch_size,
            config.val_sample_num,
            batch_size=config.batch_size,
            num_workers=config.num_workers,
            istrain='val')
        self.train_data = train_loader.get_data()
        self.val_data = val_loader.get_data()
        self.logger_info('train with device {} and pytorch {}'.format(
            0, torch.__version__))
Exemple #19
0
                        help="True or False",
                        default=False)

    args = parser.parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
    CKPT_BASE_NAME = args.backbone
    LOGFILE_NAME = CKPT_BASE_NAME + '_logfile'
    experiment_ckpt_path = args.checkpoint_folder_path + args.dataset + '_' + args.split + '_' + CKPT_BASE_NAME
    if not os.path.exists(args.checkpoint_folder_path):
        os.mkdir(args.checkpoint_folder_path)
    if not os.path.exists(experiment_ckpt_path):
        os.mkdir(experiment_ckpt_path)
        os.mkdir(experiment_ckpt_path + '/' + 'backups')

    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    logger = setup_logger('logger', formatter, LOGFILE_NAME)

    dataset_folder_path = os.path.join(args.dataset_folder, args.dataset,
                                       args.split)
    with open(dataset_folder_path + '/train_obj.pkl', 'rb') as f:
        train_obj = pickle.load(f)
    with open(dataset_folder_path + '/test_obj.pkl', 'rb') as f:
        test_obj = pickle.load(f)
    with open(dataset_folder_path + '/meta.pkl', 'rb') as f:
        meta_dict = pickle.load(f)
    with open(dataset_folder_path + '/class_to_idx.pkl', 'rb') as f:
        class_to_idx = pickle.load(f)
    with open(dataset_folder_path + '/idx_to_class.pkl', 'rb') as f:
        idx_to_class = pickle.load(f)

    if args.dataset in ['mnist', 'svhn', 'cifar10']:
Exemple #20
0
    opt.add_option('--dhcp-subnetmask', dest='dhcp_subnetmask', default=DHCP_DEFAULT_SUBNETMASK, action='store',
            help='DHCP lease subnet mask')
    opt.add_option('--dhcp-gateway', dest='dhcp_gateway', default=DHCP_DEFAULT_GW, action='store',
            help='DHCP lease gateway')
    opt.add_option('--dhcp-dns', dest='dhcp_dns', default=DHCP_DEFAULT_DNS, action='store',
            help='DHCP lease DNS')
    opt.add_option('--dhcp-bcast', dest='dhcp_bcast', default=DHCP_DEFAULT_BCAST, action='store',
            help='DHCP lease broadcast')
    opt.add_option('--dhcp-fileserver', dest='dhcp_fileserver', default='', action='store',
            help='DHCP lease fileserver IP (option 66)')
    opt.add_option('--dhcp-filename', dest='dhcp_filename', default='', action='store',
            help='DHCP lease filename (option 67)')

    options, args = opt.parse_args(sys.argv[1:])

    main_logger = utils.setup_logger('main_logger', options.logfile, options.debug)
    sip_logger = utils.setup_logger('sip_logger', options.sip_logfile, options.debug, str_format='%(asctime)s %(message)s')    
    
    main_logger.info("Starting application")
    
    main_logger.debug("SIP: Writing SIP messages in %s log file" % options.sip_logfile)
    main_logger.debug("SIP: Authentication password: %s" % options.sip_password)
    main_logger.debug("Logfile: %s" % options.logfile)
    
    if not options.terminal:
        import gui
        import Tkinter as tk

        root = tk.Tk()
        app = gui.MainApplication(root, options, main_logger)
        root.title(sys.argv[0])
Exemple #21
0
import os
import validictory

import utils

def run_main(main_func, schema, args):
	if len(args) < 2:
		print "Usage: " + args[0] + " <json config file>"
		print "For possible configs: " + args[0] + " help"
		sys.exit(1)
	if args[1].lower() in ["--h", "-h", "help", "-help", "--help"]:
		#pprint.pprint(schema)
		print json.dumps(schema, indent=2)
		sys.exit(0)

	arguments = {}
	for i in xrange(1, len(args)):
		try:
			config = args[i]
			print >> sys.stderr, "Merging %s into configuration" %(config,)
			arguments.update(json.load(open(config)))
		except Exception, e:
			print e
			sys.exit(1)

	validictory.validate(arguments, schema)
	logging_ini = None
	if "logging_ini" in arguments:
		logging_ini = arguments["logging_ini"]
	utils.setup_logger(logging_ini)
	main_func(arguments)
Exemple #22
0
def main():
    """Parse command line options/arguments and execute."""
    try:
        arg_names = ["help", "version", "quick", "strict", "debug", "stop-tag="]
        opts, args = getopt.getopt(sys.argv[1:], "hvqsdct:v", arg_names)
    except getopt.GetoptError:
        usage(2)

    detailed = True
    stop_tag = DEFAULT_STOP_TAG
    debug = False
    strict = False
    color = False

    for option, arg in opts:
        if option in ("-h", "--help"):
            usage(0)
        if option in ("-v", "--version"):
            show_version()
        if option in ("-q", "--quick"):
            detailed = False
        if option in ("-t", "--stop-tag"):
            stop_tag = arg
        if option in ("-s", "--strict"):
            strict = True
        if option in ("-d", "--debug"):
            debug = True
        if option in ("-c", "--color"):
            color = True

    if not args:
        usage(2)

    setup_logger(debug, color)

    # output info for each file
    for filename in args:
        file_start = timeit.default_timer()
        try:
            img_file = open(str(filename), 'rb')
        except IOError:
            logger.error("'%s' is unreadable", filename)
            continue
        logger.info("Opening: %s", filename)

        tag_start = timeit.default_timer()

        # get the tags
        data = process_file(img_file, stop_tag=stop_tag, details=detailed, strict=strict, debug=debug)

        tag_stop = timeit.default_timer()

        if not data:
            logger.warning("No EXIF information found\n")
            continue

        if 'JPEGThumbnail' in data:
            logger.info('File has JPEG thumbnail')
            del data['JPEGThumbnail']
        if 'TIFFThumbnail' in data:
            logger.info('File has TIFF thumbnail')
            del data['TIFFThumbnail']

        tag_keys = list(data.keys())
        tag_keys.sort()

        for i in tag_keys:
            try:
                logger.info('%s (%s): %s', i, FIELD_TYPES[data[i].field_type][2], data[i].printable)
            except:
                logger.error("%s : %s", i, str(data[i]))

        file_stop = timeit.default_timer()

        logger.debug("Tags processed in %s seconds", tag_stop - tag_start)
        logger.debug("File processed in %s seconds", file_stop - file_start)
        print("")
def main(out_dir, num_cores):
    '''
    Function to fetch the latest spot history from AWS and store in a
    dataframe saved to a local csv file for every availability zone

    Parameters
    ----------
    out_dir : string
        base file directory to store the spot history dataframes
    num_cores: integer
        number of cores to use
    '''

    # Import packages
    import boto
    import datetime
    import logging
    import os
    import pandas as pd
    from multiprocessing import Process
    from CPAC.AWS import fetch_creds

    # Import local packages
    import utils

    # Init variables
    proc_list = []
    out_csvs = []
    df_list = []
    creds_path = '/home2/dclark/secure-creds/aws-keys/dclark_cmi/dclark_cmi_keys.csv'

    # Set up logger
    now_date = datetime.datetime.now()
    log_month = now_date.strftime('%m-%Y')
    log_path = os.path.join(out_dir, 'spot_history_'+log_month+'.log')

    sh_log = utils.setup_logger('sh_log', log_path, logging.INFO, to_screen=True)

    # Get list of regions
    aws_sak, aws_aki = fetch_creds.return_aws_keys(creds_path)
    reg_conn = boto.connect_ec2(aws_sak, aws_aki)
    regions = reg_conn.get_all_regions()
    reg_conn.close()

    # Init categories to iterate through
    instance_types, product_descriptions = init_categories()

    # Form a list of the combinations of instance types and products
    instance_products = [(inst_type, prod) for inst_type in instance_types \
                                           for prod in product_descriptions]

    # Get total lengths
    reg_len = len(regions)
    ip_len = len(instance_products)

    # For each AWS region
    for reg_idx, region in enumerate(regions):
        # For each instance_type-product combination
        for ip_idx, (instance_type, product) in enumerate(instance_products):
            proc = Process(target=get_df_and_save,
                           args=(None, instance_type, product, region, out_dir))
            proc_list.append(proc)

    # Run in parallel
    utils.run_in_parallel(proc_list, num_cores)

    # Gather files to merge into one dataframe
    sh_log.info('Done fetching and saving histories.\nGathering for merge...')
    for root, dirs, files in os.walk(out_dir):
        if files:
            found_csvs = [os.path.join(root, f) for f in files \
                          if f.endswith('csv')]
            out_csvs.extend(found_csvs)

    # Create data frame list
    for csv in out_csvs:
        df_list.append(pd.DataFrame.from_csv(csv))

    # Merge dataframes
    sh_log.info('Merging dataframe list...')
    big_df = pd.concat(df_list, ignore_index=True)

    # Save to disk
    big_csv = os.path.join(out_dir, 'spot_history_%s.csv' % log_month)
    sh_log.info('Saving data frame to disk as %s' % big_csv)
    big_df.to_csv(big_csv)
if __name__ == "__main__":
    # Args: <SESSION> <VOLUME>
    session = sys.argv[1]
    volume = sys.argv[2]

    working_dir = os.path.join(conf.get_opt("working_dir"),
                               session,
                               volume)

    mkdirp(os.path.join(conf.get_opt("log_dir"), session, volume),
           exit_on_err=True)
    log_file = os.path.join(conf.get_opt("log_dir"),
                            session,
                            volume,
                            "changelog.log")

    setup_logger(logger, log_file)

    try:
        def handle_rm_error(func, path, exc_info):
            if exc_info[1].errno == ENOENT:
                return

            raise exc_info[1]

        shutil.rmtree(working_dir, onerror=handle_rm_error)
    except (OSError, IOError) as e:
        logger.error("Failed to delete working directory: %s" % e)
        sys.exit(1)
    options.update({'test.save': os.path.join(sprefix, sfolder, 'test.save')})
    options.update({'params.save': os.path.join(sprefix, sfolder, 'params.save')})
    if not os.path.exists(os.path.join(sprefix, sfolder)):
        os.makedirs(os.path.join(sprefix, sfolder))

    # current word included
    train_set, test_set = load_cate_data(options)
    mlp = init_cate_params(options)
    run_cate(mlp, train_set, test_set, options)


if __name__ == "__main__":
    log_folder = '../log'
    main_file = 'main_log'
    correct_file = 'correct_log'
    error_file = 'error_log'
    main_name = 'main'
    corr_name = 'correct'
    error_name = 'error'

    stript_path = os.path.dirname(os.path.abspath(__file__))
    log_folder_path = os.path.join(stript_path, log_folder)
    if not os.path.exists(log_folder_path):
        os.makedirs(log_folder_path)
    log_file_names = (main_file, correct_file, error_file)
    logger_names = (main_name, corr_name, error_name)
    setup_logger(log_folder_path, log_file_names, logger_names)
    log_main = logging.getLogger(main_name)
    log_main.info('\n*****date: %s*****' % datetime.datetime.now())
    main()
Exemple #26
0
    def __init__(self, root, options, main_logger):
        self.root = root
        self.options = options
        self.frame = tk.Frame(root)
        self.main_logger = main_logger
        self.sip_proxy = None
        self.tftp_server = None
        self.http_server = None

        # can enlarge
        self.frame.columnconfigure(0, weight=1)
        # first row: setting, cannot vertical enlarge:
        self.frame.rowconfigure(0, weight=0)
        # second row: registrar, can vertical enlarge:
        self.frame.rowconfigure(1, weight=1)
        
        # Settings control frame
        self.settings_frame = tk.LabelFrame(self.frame, text="Settings", padx=5, pady=5)
        self.settings_frame.grid(row=0, column=0, sticky=tk.N, padx=5, pady=5)
        
        # Registrar frame 
        #self.registrar_frame = tk.Frame(self.frame)
        #self.registrar_frame.rowconfigure(0, weight=1)

        row = 0
        tk.Label(self.settings_frame, text="General settings:", font = "-weight bold").grid(row=row, column=0, sticky=tk.W)
        row = row + 1
        
        self.gui_debug = tk.BooleanVar()
        self.gui_debug.set(self.options.debug)
        tk.Label(self.settings_frame, text="Debug:").grid(row=row, column=0, sticky=tk.W)
        tk.Checkbutton(self.settings_frame, variable=self.gui_debug, command=self.gui_debug_action).grid(row=row, column=1, sticky=tk.W)
        #row = row + 1

        self.gui_sip_ip_address = tk.StringVar()
        self.gui_sip_ip_address.set(self.options.ip_address)
        tk.Label(self.settings_frame, text="IP Address:").grid(row=row, column=2, sticky=tk.W)
        tk.Entry(self.settings_frame, textvariable=self.gui_sip_ip_address, width=15).grid(row=row, column=3, sticky=tk.W)
        row = row + 1
       
        tk.Label(self.settings_frame, text="TFTP Server:", font = "-weight bold").grid(row=row, column=0, sticky=tk.W)
        row = row + 1

        self.gui_tftp_port = tk.IntVar()
        self.gui_tftp_port.set(self.options.tftp_port)
        tk.Label(self.settings_frame, text="TFTP Port:").grid(row=row, column=0, sticky=tk.W)
        tk.Entry(self.settings_frame, textvariable=self.gui_tftp_port, width=5).grid(row=row, column=1, sticky=tk.W)
        #row = row + 1
        
        self.gui_tftp_root = tk.StringVar()
        self.gui_tftp_root.set(self.options.tftp_root)
        tk.Label(self.settings_frame, text="TFTP Directory:").grid(row=row, column=2, sticky=tk.W)
        tk.Entry(self.settings_frame, textvariable=self.gui_tftp_root, width=15).grid(row=row, column=3, sticky=tk.W)
        row = row + 1


        tk.Label(self.settings_frame, text="HTTP Server:", font = "-weight bold").grid(row=row, column=0, sticky=tk.W)
        row = row + 1
        
        self.gui_http_port = tk.IntVar()
        self.gui_http_port.set(self.options.http_port)
        tk.Label(self.settings_frame, text="HTTP Port:").grid(row=row, column=0, sticky=tk.W)
        tk.Entry(self.settings_frame, textvariable=self.gui_http_port, width=5).grid(row=row, column=1, sticky=tk.W)
        #row = row + 1
        
        self.gui_http_root = tk.StringVar()
        self.gui_http_root.set(self.options.http_root)
        tk.Label(self.settings_frame, text="HTTP Directory:").grid(row=row, column=2, sticky=tk.W)
        tk.Entry(self.settings_frame, textvariable=self.gui_http_root, width=15).grid(row=row, column=3, sticky=tk.W)
        row = row + 1

        
        tk.Label(self.settings_frame, text="DHCP Server:", font = "-weight bold").grid(row=row, column=0, sticky=tk.W)
        row = row + 1
        
        self.gui_dhcp_begin = tk.StringVar()
        self.gui_dhcp_begin.set(self.options.dhcp_begin)
        tk.Label(self.settings_frame, text="DHCP Pool start:").grid(row=row, column=0, sticky=tk.W)
        tk.Entry(self.settings_frame, textvariable=self.gui_dhcp_begin, width=15).grid(row=row, column=1, sticky=tk.W)
        #row = row + 1
        
        self.gui_dhcp_end = tk.StringVar()
        self.gui_dhcp_end.set(self.options.dhcp_end)
        tk.Label(self.settings_frame, text="DHCP Pool end:").grid(row=row, column=2, sticky=tk.W)
        tk.Entry(self.settings_frame, textvariable=self.gui_dhcp_end, width=15).grid(row=row, column=3, sticky=tk.W)
        row = row + 1

        self.gui_dhcp_subnetmask = tk.StringVar()
        self.gui_dhcp_subnetmask.set(self.options.dhcp_subnetmask)
        tk.Label(self.settings_frame, text="DHCP Subnet mask:").grid(row=row, column=0, sticky=tk.W)
        tk.Entry(self.settings_frame, textvariable=self.gui_dhcp_subnetmask, width=15).grid(row=row, column=1, sticky=tk.W)
        #row = row + 1

        self.gui_dhcp_gateway = tk.StringVar()
        self.gui_dhcp_gateway.set(self.options.dhcp_gateway)
        tk.Label(self.settings_frame, text="DHCP Gateway:").grid(row=row, column=2, sticky=tk.W)
        tk.Entry(self.settings_frame, textvariable=self.gui_dhcp_gateway, width=15).grid(row=row, column=3, sticky=tk.W)
        row = row + 1

        self.gui_dhcp_bcast = tk.StringVar()
        self.gui_dhcp_bcast.set(self.options.dhcp_bcast)
        tk.Label(self.settings_frame, text="DHCP Broadcast:").grid(row=row, column=0, sticky=tk.W)
        tk.Entry(self.settings_frame, textvariable=self.gui_dhcp_bcast, width=15).grid(row=row, column=1, sticky=tk.W)
        #row = row + 1

        self.gui_dhcp_dns = tk.StringVar()
        self.gui_dhcp_dns.set(self.options.dhcp_dns)
        tk.Label(self.settings_frame, text="DHCP DNS:").grid(row=row, column=2, sticky=tk.W)
        tk.Entry(self.settings_frame, textvariable=self.gui_dhcp_dns, width=15).grid(row=row, column=3, sticky=tk.W)
        row = row + 1

        self.gui_dhcp_fileserver = tk.StringVar()
        self.gui_dhcp_fileserver.set(self.options.dhcp_fileserver)
        tk.Label(self.settings_frame, text="DHCP Fileserver (opt. 66):").grid(row=row, column=0, sticky=tk.W)
        tk.Entry(self.settings_frame, textvariable=self.gui_dhcp_fileserver, width=25).grid(row=row, column=1, sticky=tk.W)
        #row = row + 1

        self.gui_dhcp_filename = tk.StringVar()
        self.gui_dhcp_filename.set(self.options.dhcp_filename)
        tk.Label(self.settings_frame, text="DHCP Filename (opt. 67):").grid(row=row, column=2, sticky=tk.W)
        tk.Entry(self.settings_frame, textvariable=self.gui_dhcp_filename, width=25).grid(row=row, column=3, sticky=tk.W)
        row = row + 1

        tk.Label(self.settings_frame, text="SIP Plug&Play:", font = "-weight bold").grid(row=row, column=0, sticky=tk.W)
        row = row + 1
        
        self.gui_pnp_uri = tk.StringVar()
        self.gui_pnp_uri.set(self.options.pnp_uri)
        tk.Label(self.settings_frame, text="PnP URI:").grid(row=row, column=0, sticky=tk.W)
        tk.Entry(self.settings_frame, textvariable=self.gui_pnp_uri, width=60).grid(row=row, column=1, columnspan=3, sticky=tk.W)
        row = row + 1
        
        tk.Label(self.settings_frame, text="SIP Proxy:", font = "-weight bold").grid(row=row, column=0, sticky=tk.W)
        row = row + 1
        
        self.gui_sip_redirect = tk.BooleanVar()
        self.gui_sip_redirect.set(self.options.sip_redirect)
        tk.Label(self.settings_frame, text="SIP Redirect server:").grid(row=row, column=0, sticky=tk.W)
        tk.Checkbutton(self.settings_frame, variable=self.gui_sip_redirect, command=self.gui_sip_redirect_action).grid(row=row, column=1, sticky=tk.W)
        #row = row + 1
        
        self.gui_sip_port = tk.IntVar()
        self.gui_sip_port.set(self.options.sip_port)
        tk.Label(self.settings_frame, text="SIP Port:").grid(row=row, column=2, sticky=tk.W)
        tk.Entry(self.settings_frame, textvariable=self.gui_sip_port, width=5).grid(row=row, column=3, sticky=tk.W)
        row = row + 1
 
        self.gui_sip_password = tk.StringVar()
        self.gui_sip_password.set(self.options.sip_password)
        tk.Label(self.settings_frame, text="Password:"******"Start SIP Proxy", command=self.start_sip_proxy)
        self.sip_control_button.grid(row=row, column=0, sticky=tk.N)
        
        self.tftp_control_button = tk.Button(self.settings_frame, text="Start TFTP Server", command=self.start_tftp_server)
        self.tftp_control_button.grid(row=row, column=1, sticky=tk.N)
        
        self.http_control_button = tk.Button(self.settings_frame, text="Start HTTP Server", command=self.start_http_server)
        self.http_control_button.grid(row=row, column=2, sticky=tk.N)
        
        self.dhcp_control_button = tk.Button(self.settings_frame, text="Start DHCP Server", command=self.start_dhcp_server)
        self.dhcp_control_button.grid(row=row, column=3, sticky=tk.N)
        
        self.pnp_control_button = tk.Button(self.settings_frame, text="Start PnP Server", command=self.start_pnp_server)
        self.pnp_control_button.grid(row=row, column=4, sticky=tk.N)
        
        #self.registrar_button = tk.Button(self.settings_frame, text="Reload registered", command=self.load_registrar)
        #self.registrar_button.grid(row=row, column=4, sticky=tk.N)
        #row = row + 1
        
        #self.registrar_frame.grid(row=1, column=0, sticky=tk.NS)
        
        #self.registrar_text = ScrolledText(self.registrar_frame)
        #self.registrar_text.grid(row=0, column=0, sticky=tk.NS)
        #self.registrar_text.config(state='disabled') 
    
        self.sip_queue = Queue.Queue()
        self.sip_trace_logger = utils.setup_logger('sip_widget_logger', log_file=None, debug=True, str_format='%(asctime)s %(message)s', handler=SipTraceQueueLogger(queue=self.sip_queue))
    
        self.log_queue = Queue.Queue()
        utils.setup_logger('main_logger', options.logfile, self.options.debug, handler=MessagesQueueLogger(queue=self.log_queue))
Exemple #27
0
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    args = parser.parse_args()

    ood = ["OOD.exclude_back", args.exclude_back, "OOD.ood", args.ood]

    cfg.merge_from_file(args.cfg)
    cfg.merge_from_list(ood)
    cfg.merge_from_list(args.opts)
    # cfg.freeze()

    logger = setup_logger(distributed_rank=0)  # TODO
    logger.info("Loaded configuration file {}".format(args.cfg))
    logger.info("Running with config:\n{}".format(cfg))

    # absolute paths of model weights
    cfg.MODEL.weights_encoder = os.path.join(cfg.DIR,
                                             'encoder_' + cfg.VAL.checkpoint)
    cfg.MODEL.weights_decoder = os.path.join(cfg.DIR,
                                             'decoder_' + cfg.VAL.checkpoint)
    assert os.path.exists(cfg.MODEL.weights_encoder) and \
        os.path.exists(cfg.MODEL.weights_decoder), "checkpoint does not exitst!"

    if not os.path.isdir(os.path.join(cfg.DIR, "result")):
        os.makedirs(os.path.join(cfg.DIR, "result"))

    main(cfg, args.gpu)
Exemple #28
0
import argparse
import os
import sys

def setup_args():

    options = argparse.ArgumentParser()
    
    options.add_argument('--metafile', action="store", default = "splits/train_total.csv")
    options.add_argument('--featfile', action="store")
    options.add_argument('--evalfeatfile', action="store")
    options.add_argument('--save-dir', action="store", dest="save_dir", default="results/ot/")
    options.add_argument('--eval-save-dir', action="store", dest="eval_save_dir", default=None)

    options.add_argument('--label1', action="store", default=0, type=float)
    options.add_argument('--label2', action="store", default=1, type=float)
    options.add_argument('--reg', action="store", default=1, type=float)
    options.add_argument('--nbins', action="store", default=2, type=int)
    options.add_argument('--split-features', action="store_true", default=False)

    return options.parse_args()

if __name__ == "__main__":
    args = setup_args()
    os.makedirs(args.save_dir, exist_ok=True)
    logger = setup_logger(name='ot_log', save_dir=args.save_dir)
    logger.info(" ".join(sys.argv))
    logger.info(args)
    get_ot_matrix(args, logger)
    eval_ot_matrix(args, logger)
Exemple #29
0
        if test_summary['test_loss'] < best_loss:
            best_loss = test_summary['test_loss']
            current_state['best_loss'] = best_loss
            save_checkpoint(current_state=current_state,
                            filename=os.path.join(args.save_dir,
                                                  "models/best.pth"))

        logger.info("Best loss: %s" % best_loss)

        if epoch % args.save_freq == 0:
            save_checkpoint(current_state=current_state,
                            filename=os.path.join(
                                args.save_dir, "models/epoch_%s.pth" % epoch))

        save_checkpoint(current_state=current_state,
                        filename=os.path.join(args.save_dir,
                                              "models/last.pth"))

        if torch.cuda.is_available():
            net.cuda()


if __name__ == "__main__":
    args = setup_args()
    os.makedirs(args.save_dir, exist_ok=True)
    logger = setup_logger(name='training_log', save_dir=args.save_dir)
    logger.info(" ".join(sys.argv))
    logger.info(args)
    run_training(args, logger)
Exemple #30
0
def main(instructions=None, params=None, do_one_iteration=False):
    if not instructions:
        return

    if not params:
        params = __import__('params')

    data_json = "data.json"
    actions_fname = os.path.abspath(__file__).rsplit("/",1)[0]+"/actions.txt"

    u.copy_jecs()
    logger_name = u.setup_logger()
    logger = logging.getLogger(logger_name)


    time_stats = []
    if os.path.isfile(data_json):
        with open(data_json, "r") as fhin:
            data = json.load(fhin)
            if "time_stats" in data: time_stats = data["time_stats"]

    all_samples = []
    for i in range(5000):

        if u.proxy_hours_left() < 60 and not params.FORSAKE_HEAVENLY_PROXY: u.proxy_renew()

        data = { "samples": [], "last_updated": None, "time_stats": time_stats }

        # read instructions file. if new sample found, add it to list
        # for existing samples, try to update params (xsec, kfact, etc.)
        for samp in u.read_samples(instructions):
            samp["params"] = params
            if samp not in all_samples:
                s = Samples.Sample(**samp) 
                all_samples.append(s)
            else:
                all_samples[all_samples.index(samp)].update_params(samp)


        n_done = 0
        n_samples = len(all_samples)
        for isample, s in enumerate(all_samples):

            try:
                stat = s.get_status()
                typ = s.get_type()

                # grab actions from a text file and act on them, consuming (removing) them if successful
                for dataset_name, action in u.get_actions(actions_fname=actions_fname,dataset_name=s["dataset"]):
                    if s.handle_action(action):
                        u.consume_actions(dataset_name=s["dataset"],action=action, actions_fname=actions_fname)

                if not s.pass_tsa_prechecks(): continue


                if typ == "CMS3":

                    if stat == "new":
                        s.crab_submit()
                    elif stat == "crab":
                        s.crab_parse_status()
                        if s.is_crab_done():
                            s.make_miniaod_map()
                            s.make_merging_chunks()
                            s.submit_merge_jobs()
                    elif stat == "postprocessing":
                        if s.is_merging_done():
                            if s.check_output():
                                s.make_metadata()
                                s.copy_files()
                        else:
                            s.submit_merge_jobs()
                    elif stat == "done":
                        s.do_done_stuff()
                        n_done += 1

                elif typ == "BABY":
                    
                    if stat == "new":
                        s.set_baby_inputs()
                        s.submit_baby_jobs()

                    elif stat == "condor" or stat == "postprocessing":
                        if params.open_datasets:
                            s.check_new_merged_for_babies()

                        if not params.open_datasets and s.is_babymaking_done():
                            s.set_status("done")
                        else:
                            # s.sweep_babies()
                            s.sweep_babies_parallel()
                            s.submit_baby_jobs()

                    elif stat == "done":
                        if params.open_datasets:
                            s.check_new_merged_for_babies()
                        else:
                            s.do_done_stuff()
                            n_done += 1


                s.save()
                data["samples"].append( s.get_slimmed_dict() )

            except Exception, err:
                logger.info( "send an (angry?) email to Nick with the Traceback below!!")
                logger.info( traceback.format_exc() )

        breakdown_crab = u.sum_dicts([samp["crab"]["breakdown"] for samp in data["samples"] if "crab" in samp and "breakdown" in samp["crab"]])
        # breakdown_baby = u.sum_dicts([{"baby_"+key:samp["baby"].get(key,0) for key in ["running", "sweepRooted"]} for samp in data["samples"] if samp["type"] == "BABY"])
        breakdown_baby = u.sum_dicts([{"running_babies":samp["baby"]["running"], "sweepRooted_babies":samp["baby"]["sweepRooted"]} for samp in data["samples"] if samp["type"] == "BABY"])
        tot_breakdown = u.sum_dicts([breakdown_crab, breakdown_baby])
        data["last_updated"] = u.get_timestamp()
        data["time_stats"].append( (u.get_timestamp(), tot_breakdown) )
        data["log"] = u.get_last_n_lines(fname=params.log_file, N=100)
        with open(data_json, "w") as fhout:
            data["samples"] = sorted(data["samples"], key=lambda x: x.get("status","done")=="done")
            json.dump(data, fhout, sort_keys = True, indent = 4)
        u.copy_json(params)

        if params.exit_when_done and (n_done == n_samples):
            print ">>> All %i samples are done. Exiting." % n_samples
            break

        if not do_one_iteration:
            sleep_time = 60 if i < 2 else 2*600
            logger.debug("sleeping for %i seconds..." % sleep_time)
            u.smart_sleep(sleep_time, files_to_watch=[actions_fname, instructions])
        else:
            break
Exemple #31
0
def main():
    import pydevd_pycharm
    pydevd_pycharm.settrace('172.26.3.54',
                            port=12345,
                            stdoutToServer=True,
                            stderrToServer=True)
    parser = argparse.ArgumentParser(description="DFDGAN Showing G pic")
    parser.add_argument("--config_file",
                        default="./configs/show_pic.yml",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()
    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    datasets_dir = ''
    for dataset_name in cfg.DATASETS.NAMES:
        if datasets_dir != '':
            datasets_dir += '-'
        datasets_dir += dataset_name
    output_dir = os.path.join(output_dir, datasets_dir)
    time_string = 'show_pic[{}]'.format(
        time.strftime('%Y-%m-%d-%X', time.localtime(time.time())))
    output_dir = os.path.join(output_dir, time_string)
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    device = cfg.TEST.DEVICE
    if device == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.TEST.DEVICE_ID
    cudnn.benchmark = True
    logger = setup_logger("DFDGAN", output_dir, 0)
    logger.info("Running with config:\n{}".format(cfg))

    data_loader, num_classes = make_dataloaders(cfg)
    E = Encoder(num_classes, cfg.E.LAST_STRIDE, cfg.E.PRETRAIN_PATH,
                cfg.E.NECK, cfg.TEST.NECK_FEAT, cfg.E.NAME,
                cfg.E.PRETRAIN_CHOICE).to(device)
    Ed = Encoder(num_classes, cfg.ED.LAST_STRIDE, cfg.ED.PRETRAIN_PATH,
                 cfg.ED.NECK, cfg.TEST.NECK_FEAT, cfg.ED.NAME,
                 cfg.ED.PRETRAIN_CHOICE).to(device)
    G = DFDGenerator(cfg.G.PRETRAIN_PATH,
                     cfg.G.PRETRAIN_CHOICE,
                     noise_size=cfg.TRAIN.NOISE_SIZE).to(device)
    for _, batch in enumerate(data_loader):
        img_x1, img_x2, img_y1, img_y2, target_pid, target_setid = batch
        img_x1, img_x2, img_y1, img_y2, target_pid, target_setid = img_x1.to(
            device), img_x2.to(device), img_y1.to(device), img_y2.to(
                device), target_pid.to(device), target_setid.to(device)
        g_img = G(E(img_x1)[0], Ed(img_y1)[0])
        img_x1_PIL = transforms.ToPILImage()(img_x1[0].cpu()).convert('RGB')
        img_x1_PIL.save(os.path.join(output_dir, 'img_x1.jpg'))
        img_y1_PIL = transforms.ToPILImage()(img_y1[0].cpu()).convert('RGB')
        img_y1_PIL.save(os.path.join(output_dir, 'img_y1.jpg'))
        g_img_PIL = transforms.ToPILImage()(g_img[0].cpu()).convert('RGB')
        g_img_PIL.save(os.path.join(output_dir, 'g_img.jpg'))
        break
def main():
    global best_score, logger, logger_results, slide_weights
    print('inside train_inception.py ')
    opt = Options(isTrain=True)
    opt.parse()
    opt.save_options()

    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
        str(x) for x in opt.train['gpus'])

    # set up logger
    logger, logger_results = utils.setup_logger(opt)
    opt.print_options(logger)

    # ---------- Create model ---------- #
    model = Inception_v3(opt.model['out_c'])
    model = model.cuda()

    # logger.info(model)
    # ---------- End create model ---------- #

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss()
    # params are taken from the nas scoring paper
    lr = 0.00005
    logger.info('lr :{}\n'.format(lr))
    optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)

    exp_lr_scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                                      mode='min',
                                                      factor=0.2,
                                                      patience=2,
                                                      verbose=False,
                                                      threshold=0.0001,
                                                      threshold_mode='rel',
                                                      cooldown=1,
                                                      min_lr=1e-7,
                                                      eps=1e-08)

    # ---------- Data loading ---------- #
    data_transforms = {
        'train':
        transforms.Compose([
            transforms.Resize((299, 299)),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.RandomRotation(randint(1, 45)),
            transforms.RandomAffine(degrees=0, translate=(0.1, 0.1)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ]),
        'test':
        transforms.Compose([
            transforms.Resize((299, 299)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
    }

    fold_num = opt.exp_num.split('_')[-1]
    logger.info('Fold number: {:s}'.format(fold_num))

    if opt.exp in ['fib']:
        #base_path = '/dresden/users/aj611/experiments/biomed/he_images/'
        base_path = '/dresden/users/aj611/experiments/biomed/he_images_check/'
    else:
        #base_path = '/dresden/users/aj611/experiments/biomed/he_images_3x/'
        base_path = '/dresden/users/aj611/experiments/biomed/he_images_3x_check/'
    #data_dir = opt.train['data_dir'] + opt.exp + '/' + 'fold_{}/'.format(fold_num)
    data_dir = base_path + opt.exp + '/' + 'fold_{}/'.format(fold_num)
    print('data_dir :', data_dir)
    #data_dir = '/dresden/users/aj611/experiments/biomed/he_images_bkp/fibrosis/'

    batch_size = 8
    image_datasets = {
        x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
        for x in ['train', 'test']
    }
    #dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size = batch_size, drop_last=True, shuffle = True, num_workers = 8) for x in ['train', 'test']}
    dataloaders = {}
    x = 'train'
    dataloaders[x] = torch.utils.data.DataLoader(image_datasets[x],
                                                 batch_size=batch_size,
                                                 drop_last=True,
                                                 shuffle=True,
                                                 num_workers=8)
    x = 'test'
    dataloaders[x] = torch.utils.data.DataLoader(image_datasets[x],
                                                 batch_size=1,
                                                 drop_last=True,
                                                 shuffle=True,
                                                 num_workers=8)
    dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'test']}

    logger.info('data_dir :{}\n'.format(data_dir))
    class_names = image_datasets['train'].classes

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # ---------- End Data loading ---------- #

    # ----- optionally load from a checkpoint ----- #
    # if opt.train['checkpoint']:
    #     model_state_dict, optimizer_state_dict = load_checkpoint(opt.train['checkpoint'])
    #     model.load_state_dict(model_state_dict)
    #     optimizer.load_state_dict(optimizer_state_dict)
    # ----- End checkpoint loading ----- #

    # ----- Start training ---- #
    best_score = 0
    for epoch in range(opt.train['epochs']):
        # train and validate for one epoch
        test_auc = train_model(model, opt, dataset_sizes, dataloaders,
                               image_datasets, criterion, optimizer,
                               exp_lr_scheduler, epoch)
        print('epoch {}'.format(epoch))
        # remember best accuracy and save checkpoint
        is_best = test_auc > best_score
        best_score = max(test_auc, best_score)
        cp_flag = False
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_score': best_score,
                'optimizer': optimizer.state_dict(),
            }, is_best, opt.train['save_dir'], cp_flag, epoch + 1)

        # save training results
        #logger_results.info('{:<6d}| {:<12.4f}{:<12.4f}||  {:<12.4f}{:<12.4f}{:<12.4f}'
        #                    .format(epoch, train_loss, train_acc,
        #                            test_loss, test_acc, test_auc))

    for i in list(logger.handlers):
        logger.removeHandler(i)
        i.flush()

    for i in list(logger.handlers):
        logger.removeHandler(i)
        i.flush()
        i.close()
    for i in list(logger_results.handlers):
        logger_results.removeHandler(i)
        i.flush()
        i.close()
Exemple #33
0
from __future__ import print_function
from __future__ import division

import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import numpy as np

from utils import maybe_cuda, setup_logger, unsort
from times_profiler import profiler

logger = setup_logger(__name__, 'train.log')
profilerLogger = setup_logger("profilerLogger", 'profiler.log', True)


def zero_state(module, batch_size):
    # * 2 is for the two directions
    return Variable(maybe_cuda(torch.zeros(module.num_layers * 2, batch_size, module.hidden))), \
           Variable(maybe_cuda(torch.zeros(module.num_layers * 2, batch_size, module.hidden)))


class SentenceEncodingCNN(nn.Module):
    def __init__(self, D=300, Ci=1, Co=32, Ks=[2, 3]):
        super(SentenceEncodingCNN, self).__init__()
        self.convs1 = nn.ModuleList([nn.Conv2d(Ci, Co, (K, D)) for K in Ks])

    def forward(self, x):
        x = x.unsqueeze(1)  # (N, Ci, W, D)
        x = [F.relu(conv(x)).squeeze(3)
Exemple #34
0
from utils import setup_logger
from settings import run_folder

### SET all LOGGER_DISABLED to True to disable logging
### WARNING: the mcts log file gets big quite quickly

LOGGER_DISABLED = {
'main':False
, 'memory':True
, 'tourney':False
, 'mcts':True
, 'model': True}


logger_mcts = setup_logger('logger_mcts', run_folder + 'logs/logger_mcts.log')
logger_mcts.disabled = LOGGER_DISABLED['mcts']

logger_main = setup_logger('logger_main', run_folder + 'logs/logger_main.log')
logger_main.disabled = LOGGER_DISABLED['main']

logger_tourney = setup_logger('logger_tourney', run_folder + 'logs/logger_tourney.log')
logger_tourney.disabled = LOGGER_DISABLED['tourney']

logger_memory = setup_logger('logger_memory', run_folder + 'logs/logger_memory.log')
logger_memory.disabled = LOGGER_DISABLED['memory']

logger_model = setup_logger('logger_model', run_folder + 'logs/logger_model.log')
logger_model.disabled = LOGGER_DISABLED['model']
 
Exemple #35
0
def main(args):
    this_dir = osp.join(osp.dirname(__file__), '.')
    save_dir = osp.join(this_dir, 'checkpoints')
    if not osp.isdir(save_dir):
        os.makedirs(save_dir)
    command = 'python ' + ' '.join(sys.argv)
    logger = utl.setup_logger(osp.join(this_dir, 'log.txt'), command=command)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    utl.set_seed(int(args.seed))

    model = build_model(args)
    if osp.isfile(args.checkpoint):
        checkpoint = torch.load(args.checkpoint,
                                map_location=torch.device('cpu'))
        model.load_state_dict(checkpoint['model_state_dict'])
    else:
        model.apply(utl.weights_init)
    if args.distributed:
        model = nn.DataParallel(model)
    model = model.to(device)

    criterion = utl.MultiCrossEntropyLoss(ignore_index=21).to(device)
    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay)
    if osp.isfile(args.checkpoint):
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        for param_group in optimizer.param_groups:
            param_group['lr'] = args.lr
        args.start_epoch += checkpoint['epoch']
    softmax = nn.Softmax(dim=1).to(device)

    for epoch in range(args.start_epoch, args.start_epoch + args.epochs):
        if epoch == 21:
            args.lr = args.lr * 0.1
            for param_group in optimizer.param_groups:
                param_group['lr'] = args.lr

        data_loaders = {
            phase: utl.build_data_loader(args, phase)
            for phase in args.phases
        }

        enc_losses = {phase: 0.0 for phase in args.phases}
        enc_score_metrics = []
        enc_target_metrics = []
        enc_mAP = 0.0
        dec_losses = {phase: 0.0 for phase in args.phases}
        dec_score_metrics = []
        dec_target_metrics = []
        dec_mAP = 0.0

        start = time.time()
        for phase in args.phases:
            training = phase == 'train'
            if training:
                model.train(True)
            elif not training and args.debug:
                model.train(False)
            else:
                continue

            with torch.set_grad_enabled(training):
                for batch_idx, (camera_inputs, motion_inputs, enc_target, dec_target) \
                        in enumerate(data_loaders[phase], start=1):
                    batch_size = camera_inputs.shape[0]
                    camera_inputs = camera_inputs.to(device)
                    motion_inputs = motion_inputs.to(device)
                    enc_target = enc_target.to(device).view(
                        -1, args.num_classes)
                    dec_target = dec_target.to(device).view(
                        -1, args.num_classes)

                    enc_score, dec_score = model(camera_inputs, motion_inputs)
                    enc_loss = criterion(enc_score, enc_target)
                    dec_loss = criterion(dec_score, dec_target)
                    enc_losses[phase] += enc_loss.item() * batch_size
                    dec_losses[phase] += dec_loss.item() * batch_size
                    if args.verbose:
                        print(
                            'Epoch: {:2} | iteration: {:3} | enc_loss: {:.5f} dec_loss: {:.5f}'
                            .format(epoch, batch_idx, enc_loss.item(),
                                    dec_loss.item()))

                    if training:
                        optimizer.zero_grad()
                        loss = enc_loss + dec_loss
                        loss.backward()
                        optimizer.step()
                    else:
                        # Prepare metrics for encoder
                        enc_score = softmax(enc_score).cpu().numpy()
                        enc_target = enc_target.cpu().numpy()
                        enc_score_metrics.extend(enc_score)
                        enc_target_metrics.extend(enc_target)
                        # Prepare metrics for decoder
                        dec_score = softmax(dec_score).cpu().numpy()
                        dec_target = dec_target.cpu().numpy()
                        dec_score_metrics.extend(dec_score)
                        dec_target_metrics.extend(dec_target)
        end = time.time()

        if args.debug:
            result_file = 'inputs-{}-epoch-{}.json'.format(args.inputs, epoch)
            # Compute result for encoder
            enc_mAP = utl.compute_result_multilabel(
                args.class_index,
                enc_score_metrics,
                enc_target_metrics,
                save_dir,
                result_file,
                ignore_class=[0, 21],
                save=True,
            )
            # Compute result for decoder
            dec_mAP = utl.compute_result_multilabel(
                args.class_index,
                dec_score_metrics,
                dec_target_metrics,
                save_dir,
                result_file,
                ignore_class=[0, 21],
                save=False,
            )

        # Output result
        logger.output(epoch,
                      enc_losses,
                      dec_losses,
                      len(data_loaders['train'].dataset),
                      len(data_loaders['test'].dataset),
                      enc_mAP,
                      dec_mAP,
                      end - start,
                      debug=args.debug)

        # Save model
        checkpoint_file = 'inputs-{}-epoch-{}.pth'.format(args.inputs, epoch)
        torch.save(
            {
                'epoch':
                epoch,
                'model_state_dict':
                model.module.state_dict()
                if args.distributed else model.state_dict(),
                'optimizer_state_dict':
                optimizer.state_dict(),
            }, osp.join(save_dir, checkpoint_file))
Exemple #36
0
import numpy as np
import pandas as pd
import datetime
import os, sys, gc, time, warnings, pickle, psutil, random
from tqdm import tqdm
from multiprocessing import Pool
from utils import setup_logger
warnings.filterwarnings('ignore')

########################### logger
#################################################################################
NOW = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
logger = setup_logger(f'./logs/train_{NOW}.log')

########################### Helpers
#################################################################################


def seed_everything(seed=0):
    random.seed(seed)
    np.random.seed(seed)


## Multiprocess Runs
def df_parallelize_run(func, t_split):
    num_cores = np.min([N_CORES, len(t_split)])
    pool = Pool(num_cores)
    df = pd.concat(pool.map(func, t_split), axis=1)
    pool.close()
    pool.join()
    return df
Exemple #37
0
def main():
    global best_score, logger, logger_results, slide_weights
    opt = Options(isTrain=True)
    opt.parse()
    opt.save_options()

    # tb_writer = SummaryWriter('{:s}/tb_logs'.format(opt.train['save_dir']))

    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(x) for x in opt.train['gpus'])

    # set up logger
    logger, logger_results = utils.setup_logger(opt)
    opt.print_options(logger)

    if opt.train['random_seed'] >= 0:
        # logger.info("=> Using random seed {:d}".format(opt.random_seed))
        torch.manual_seed(opt.train['random_seed'])
        torch.cuda.manual_seed(opt.train['random_seed'])
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
        np.random.seed(opt.train['random_seed'])
    else:
        torch.backends.cudnn.benchmark = True

    # ---------- Create model ---------- #
    #print('in train file', opt.model['use_resnet'])
    if opt.model['use_resnet'] == 1:
        #print('opt model use_resnet ', type(opt.model['use_resnet']))
        #print('choosing baseline2')
        model = BaselineNet2(opt.model['out_c'], opt.model['resnet_layers'], opt.model['train_res4'])
        logger.info('choosing BaselineNet2(resnet based) model')
    else:
        model = BaselineNet(opt.model['in_c'], opt.model['out_c'])
        logger.info('choosing BaselineNet model')
        if opt.model['pre_train_sup'] == 1:
            print("=> loading pre-trained model from path", opt.model['sup_model_path'])
            model.set_fea_extractor(opt.model['sup_model_path'])

    model = model.cuda()
    # logger.info(model)
    # ---------- End create model ---------- #

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.Adam(model.parameters(), opt.train['lr'], weight_decay=opt.train['weight_decay'])

    # ---------- Data loading ---------- #
    data_transform = transforms.Compose([
                    transforms.Resize((224, 224)),
                    transforms.ToTensor(),
                    transforms.Normalize(mean=[0.485],
                                         std=[0.229])
                ])

    fold_num = opt.exp_num.split('_')[-1]
    logger.info('Fold number: {:s}'.format(fold_num))
    if opt.model['use_resnet'] == 1:
        train_set = LiverDataset('{:s}/train{:s}.h5'.format(opt.train['data_dir'], fold_num), data_transform, opt)
        test_set = LiverDataset('{:s}/test{:s}.h5'.format(opt.train['data_dir'], fold_num), data_transform, opt)
    else:
        train_set = LiverDataset('{:s}/train{:s}.h5'.format(opt.train['data_dir'], fold_num), data_transform)
        test_set = LiverDataset('{:s}/test{:s}.h5'.format(opt.train['data_dir'], fold_num), data_transform)
    # loading from the pre-trained model
    #print("=> loading pre-trained model from path", opt.model['sup_model_path'])
    #model.set_fea_extractor(opt.model['sup_model_path'])
    # ---------- End Data loading ---------- #

    # ----- optionally load from a checkpoint ----- #
    # if opt.train['checkpoint']:
    #     model_state_dict, optimizer_state_dict = load_checkpoint(opt.train['checkpoint'])
    #     model.load_state_dict(model_state_dict)
    #     optimizer.load_state_dict(optimizer_state_dict)
    # ----- End checkpoint loading ----- #

    # ----- Start training ---- #
    best_score = 0
    for epoch in range(opt.train['epochs']):
        # train and validate for one epoch
        train_loss, train_acc = train(opt, train_set, model, criterion, optimizer, epoch)
        test_loss, test_acc, test_auc = test(opt, test_set, model, criterion, epoch)

        # remember best accuracy and save checkpoint
        is_best = test_auc > best_score
        best_score = max(test_auc, best_score)
        cp_flag = False
        save_checkpoint({
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_score': best_score,
            'optimizer': optimizer.state_dict(),
        }, is_best, opt.train['save_dir'], cp_flag, epoch+1)

        # save training results
        logger_results.info('{:<6d}| {:<12.4f}{:<12.4f}||  {:<12.4f}{:<12.4f}{:<12.4f}'
                            .format(epoch, train_loss, train_acc,
                                    test_loss, test_acc, test_auc))

    for i in list(logger.handlers):
        logger.removeHandler(i)
        i.flush()
        i.close()
    for i in list(logger_results.handlers):
        logger_results.removeHandler(i)
        i.flush()
        i.close()
Exemple #38
0
                                    help='A csv containing phenotypic info for the release. Used to create participants.tsv') 
    # For copying inherited files.
    parser.add_argument('-i', '--inherited', required=False,
                                    help='A path to a directory containing task metadata in JSON format, as well as other files to be included at the top level of the BIDS directory (such as the dataset description).  Some of this data will be inherited by all scans.')
    
    args = parser.parse_args()
    bids_warehouse = os.path.abspath(args.bids_warehouse)
    log_dir = os.path.abspath(args.log_dir)

    # Log errors
    now = datetime.now()
    bids_logs=os.path.join(log_dir,'bids_logs')
    log_path=os.path.join(bids_logs,str(now.year),str(now.month),str(now.day))
    if not os.path.exists(log_path):
        mkdir_p(log_path)
    bids_nifti_sanity_log=setup_logger('bids_nifti_sanity',os.path.join(log_path,'bids_nifti_sanity.log'), logging.INFO)
    
    # Read anonymization key in as dictionary and define output directory.
    anonmap={}
    if args.anon_ursi:
        with open(os.path.abspath(args.anon_ursi),'rU') as map_file:
            csvreader = csv.reader(map_file,delimiter=',')
            for row in csvreader:
                anonmap[row[1]]=row[0]
        output_dir=os.path.join(bids_warehouse,'Anonymized')
    else:
        output_dir=os.path.join(bids_warehouse,'Non-anonymized')
    mkdir_p(output_dir)

    if args.inherited:
        bids_inherit_log=setup_logger('bids_inherit',os.path.join(log_path,'bids_inherit_jsons.log'), logging.INFO)
Exemple #39
0
from api_observer import ApiObserver
from bot import YobitBot
from constants import BOT_TOKEN
from models import Chat
from utils import setup_logger

if __name__ == '__main__':
  if not Chat.table_exists():
    Chat.create_table()

  bot = YobitBot(BOT_TOKEN)
  bot.start()

  setup_logger()
  observer = ApiObserver(bot)
  observer.observe()
Exemple #40
0
    def handle(self):
        data = self.request[0]
        self.data = data.split("\r\n")
        self.socket = self.request[1]
        request_uri = self.data[0]
        if rx_request_uri.search(request_uri) or rx_code.search(request_uri):
            self.server.sip_logger.debug("Received from %s:%d (%d bytes):\n\n%s" %  (self.client_address[0], self.client_address[1], len(data), data))
            self.processRequest()
        else:
            if len(data) > 4:
                self.server.sip_logger.debug("Received from %s:%d (%d bytes):\n\n" %  (self.client_address[0], self.client_address[1], len(data)))
                mess = hexdump(data,' ',16)
                self.server.sip_logger.debug('PnP Hex data:\n' + '\n'.join(mess))

if __name__ == '__main__':
    import utils

    class Options:
        ip = "127.0.0.1"
        pnp_uri = "http://test.com"
    
    options = Options()
    
    HOST = "224.0.1.75"
    PORT = 5060
    main_logger = utils.setup_logger('main_logger', None, True)
    sip_logger = utils.setup_logger('sip_logger', None, True)

    pnp_server = SipTracedMcastUDPServer((HOST, PORT), UDPHandler, main_logger, sip_logger, options)
    pnp_server.serve_forever()
Exemple #41
0
"""
import time
import sched
import pandas
import logging
import requests
from io import StringIO

import utils
from database import upsert_bpa

BPA_SOURCE = 'https://og-production-open-data-bostonma-892364687672.s3.amazonaws.com/resources/f123e65d-dc0e-4c83-9348-ed46fec498c0/tmpke4utrxy.csv?Signature=UAjyK0cDg%2BFiT22SGIeHlr12eQ8%3D&Expires=1575837619&AWSAccessKeyId=AKIAJJIENTAPKHZMIPXQ'
MAX_DOWNLOAD_ATTEMPT = 5
DOWNLOAD_PERIOD = 10  # second
logger = logging.Logger(__name__)
utils.setup_logger(logger, 'data.log')


def download_bpa(url=BPA_SOURCE, retries=MAX_DOWNLOAD_ATTEMPT):
    """Returns BPA text from `BPA_SOURCE` that includes power loads and resources
    Returns None if network failed
    """
    print('hi')
    text = None
    for i in range(retries):
        try:
            req = requests.get(url, timeout=0.5)
            req.raise_for_status()
            text = req.text
        except requests.exceptions.HTTPError as e:
            logger.warning("Retry on HTTP Error: {}".format(e))
Exemple #42
0
    def __init__(self,
                 id,
                 base,
                 night,
                 configfile='C:/minerva-control/config/com.ini'):
        #ipdb.set_trace()
        #S set the id of self to the memory addresss, unique identifier

        self.lock = threading.Lock()
        self.id = id
        self.base_directory = base
        self.night = night

        #S Parses configfile into List based on id
        #? Not sure if this is how id is working, but makes sense. Need to look
        #? into *.ini files more.
        configObj = ConfigObj(configfile)

        #S Get the corresponding item/object (Typer??) from list of configObj,
        #S throws if not in configfile, printing to stdout. No log it
        try:
            config = configObj[self.id]
        except:
            print('ERROR accessing ', self.id, ".", self.id,
                  " was not found in the configuration file", configfile)
            return

        #? Not really sure what is going on, need to look into com.ini
        self.flowcontrol = str(config['Setup']['FLOWCONTROL'])
        #self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        #S Looks like more fun in com.ini, I think it makes sense though, adds
        #S allowable serial commands for communication with instrument.
        self.allowedCmds = config['Setup']['ALLOWEDCMDS']

        #S Is this short for 'Termination String'? I think so, for ending a
        #S command in serila comm. A little confused as \n is CR and LF, may
        #S get into trouble with picky instruments.
        if config['Setup']['TERMSTR'] == r"\r":
            self.termstr = "\r"
        elif config['Setup']['TERMSTR'] == r"\r\n":
            self.termstr = "\r\n"
        elif config['Setup']['TERMSTR'] == r"\n\r":
            self.termstr = "\n\r"
        elif config['Setup']['TERMSTR'] == r"\n":
            self.termstr = "\n"
        elif config['Setup']['TERMSTR'] == "":
            self.termstr = ""

        #S Gives self a Serial class
        self.ser = serial.Serial()
        #S Get the port, baud, expected bits, and the stopbit from configfile.
        self.ser.port = str(config['Setup']['PORT'])
        self.ser.baudrate = int(config['Setup']['BAUDRATE'])
        self.ser.databits = int(config['Setup']['DATABITS'])
        #? Parity commented out?
        #        self.ser.parity = str(config['Setup']['PARITY'])
        self.ser.stopbits = int(config['Setup']['STOPBITS'])

        #S Looks like log writing stuff, comes from configfile as well.
        self.logger_name = config['Setup']['LOGNAME']

        #S Nead to read more into logging package, see what's going on here
        self.logger = utils.setup_logger(self.base_directory, self.night,
                                         self.logger_name)

        #S Ensure that the serial port is definitely closed.
        self.ser.close()
Exemple #43
0
#!/usr/bin/env python3
import logging

from Piloton import Piloton
from utils import setup_logger, set_logger_level

if __name__ == "__main__":
    # Set up root logger
    logger = setup_logger(logging_level=logging.DEBUG)
    set_logger_level("bleak", logging_level=logging.WARNING)
    set_logger_level("urllib3", logging_level=logging.WARNING)
    set_logger_level("asyncio", logging_level=logging.WARNING)

    # Set up Piloton
    piloton: Piloton = Piloton()

    # Run Piloton
    piloton.app()
Exemple #44
0
def main(argv):
    """
    Entry point for chart generation.
    """
    option_parser = optparse.OptionParser(usage=DEFAULT_USAGE_TEXT)
    option_parser.add_option("-c", "--config", dest="config",
                             default="config.cfg", help="Configuration file")
    option_parser.add_option("-v", "--verbose", dest="verbose",
                             action="store_true", default=False,
                             help="Show verbose output")

    option_parser.add_option("-x", "--x_axis", type="choice",
                             choices=[TIME],
                             help="Label for x-axis: time")

    option_parser.add_option("-X", "--x_output", dest="x_output",
                             help="Unit for x-axis. "
                             "time: daily, month, yearly or range 2000,2003. "
                             "Range for time is separated by comma to allow "
                             "daily range such as -X 2000-01-01,2003-08-20")

    option_parser.add_option("-y", "--y_axis", type="choice",
                             choices=[SALARY],
                             help="Label for y-axis: salary")

    option_parser.add_option("-Y", "--y_output", type="choice",
                             choices=[MEDIAN, TOTAL],
                             help="Unit for y-axis. salary: median or total")

    option_parser.add_option("-d", "--data_type", type="choice",
                             choices=[AGE, COMPANY, GENDER, JOBROLE, MANAGER],
                             help="Data type for plotted lines: "
                             "age, company, gender, jobrole or manager")

    option_parser.add_option("-D", "--data_output", dest="data_output",
                             help="Unit for plotted lines. "
                             "age: comma-separated list of ages or ranges, "
                             "company: comma-separated list of IDs, "
                             "gender: comma-separated list (male,female, "
                             "undefined or empty for all), "
                             "jobrole: comma-separated list of job roles, "
                             "manager: comma-separated list (true,false)")

    options, _ = option_parser.parse_args(argv)
    error = ""
    if not options.x_axis:
        error = "--x_axis is required"
    if not options.x_output:
        error = "--x_output is required"
    if not options.y_axis:
        error = "--y_axis is required"
    if not options.y_output:
        error = "--y_output is required"
    if not options.data_type:
        error = "--data_type is required"
    if not options.data_output:
        error = "--data_output is required"
    if error:
        sys.stderr.write("ERROR: {}\n".format(error))
        option_parser.print_help()
        return 1

    if not os.path.exists(options.config):
        sys.stderr.write("ERROR: {} does not exist\n".format(options.config))
        option_parser.print_help()
        return 1
    config = read_config(options.config)

    log_dir = config['general']['log_dir']
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)
    filename = os.path.join(log_dir, __file__.replace(".py", ".log"))
    setup_logger(filename, options.verbose)
    logging.debug("config={}".format(json.dumps(config, indent=2)))

    retcode = generate_chart(config, options)
    return retcode
Exemple #45
0
    help='maximum length of an episode (default: 100000)')
args = parser.parse_args()

setup_json = read_config(args.env_config)
env_conf = setup_json["Default"]
for i in setup_json.keys():
    if i in args.env:
        env_conf = setup_json[i]
torch.set_default_tensor_type('torch.FloatTensor')

saved_state_path = os.path.join(args.load_model_dir, args.env + '.model')
saved_state = torch.load(saved_state_path, map_location=lambda storage, loc: storage)
print('Loaded trained model from: {}'.format(saved_state_path))

log = {}
setup_logger('{}_mon_log'.format(args.env), r'{0}{1}_mon_log'.format(
    args.log_dir, args.env))
log['{}_mon_log'.format(args.env)] = logging.getLogger(
    '{}_mon_log'.format(args.env))

env = atari_env("{}".format(args.env), env_conf)
model = A3Clstm(env.observation_space.shape[0], env.action_space)

num_tests = 0
reward_total_sum = 0
player = Agent(model, env, args, state=None)
player.env = gym.wrappers.Monitor(player.env, "{}_monitor".format(args.env), force=True)
player.model.eval()
for i_episode in range(args.num_episodes):
    state = player.env.reset()
    player.state = torch.from_numpy(state).float()
    player.eps_len = 0
Exemple #46
0
 def __setup_logger(self,name):
     return utils.setup_logger(name, is_cobblerd=self.is_cobblerd, **self.log_settings)
Exemple #47
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
# by: pantuts

# DISCLAIMER: FOR TESTING AND EDUCATIONAL PURPOSES ONLY
# ANYTHING YOU DO WILL NOT AND SHOULD NOT AND CANNOT AFFECT THE AUTHOR, BE RESPONSIBLE!

import os
import time

from bs4 import BeautifulSoup
from datetime import datetime
from tld import get_fld
from utils import setup_logger, request, CSVUtils

logger = setup_logger(__name__)
csv_utils = CSVUtils()

SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0].title()
NAME = 'CardLife'
CSV_FNAME = NAME + '_' + datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d') + '.csv'

NOW = datetime.strftime(datetime.now(), '%Y-%m-%d')

MAIN_URL = 'https://www.cardlifeapp.com/saas-directory'


class Cardlife:
    def get_categories(self):
        logger.info('Getting categories.')
        resp = request(MAIN_URL)
Exemple #48
0
NUMBER_OF_ROWS = 10 ** 2
NUMBER_OF_COLS = 10
ROW_OFFSET = 2
COL_OFFSET = 2
VALUES_BUTTON_MAX_WIDTH = 50
VARIABLE_LIST_COLS_WIDTH = 50
VARIABLE_LIST_COL_N_WIDTH = 20
VARIABLE_LIST_COLS_COUNT = 6
N_ROLE = 0
NAME_ROLE = 1
TYPE_ROLE = 2
WIDTH_ROLE = 3
DECIMAL_ROLE = 4
LABEL_ROLE = 5

if __name__ == '__main__':
    sys.excepthook = handle_exception
    setup_logger("FS")
    app = QApplication(sys.argv)

    app.setStyle("Fusion")
    app.blockSignals(True)
    fill_random()
    app.blockSignals(False)

    window = MainWindow()
    window.show()

    exit_code = app.exec_()
    sys.exit(exit_code)
parser.add_argument('--hidden', type=int, default=128, help='Number of hidden units.')
parser.add_argument('--nb_heads', type=int, default=8, help='Number of head attentions.')
parser.add_argument('--dropout', type=float, default=0.5, help='Dropout rate (1 - keep probability).')
parser.add_argument('--alpha', type=float, default=0.2, help='Alpha for the leaky_relu.')
parser.add_argument('--patience', type=int, default=10000, help='Patience')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

logger = setup_logger('baseline', args.output_path, 0)

if args.output_path and not os.path.exists(args.output_path):
    os.makedirs(args.output_path)

# Load data
# adj, features, labels, idx_train, idx_val, idx_test = load_data()

# train_feature_node_path = os.path.join(args.train_path, 'node_features')
train_img_path = os.path.join(args.train_path, 'imgs')
train_edge_path = os.path.join(args.train_path, 'edge_adjs')
train_label_path = os.path.join(args.train_path, 'labels')
train_roi_path = os.path.join(args.train_path, 'roi')
# train_obj_path = os.path.join(args.train_path, 'mask_objs')

# val_feature_node_path = os.path.join(args.val_path, 'node_features')
Exemple #50
0
    return celery

celery = make_celery(app)

# DB
db = MongoEngine()
db.init_app(app)
db_wrapper = Database(db, cfg)

if __name__ == '__main__':

    # TODO - not this
    __builtin__.flock_app = app

    # Logging
    setup_logger(cfg["logging"]["file"], cfg["logging"]["level"])

    # Init Rollbar
    @app.before_first_request
    def init_rollbar():
        rollbar.init(
            cfg["rollbar"]["token"],
            cfg["rollbar"]["environment"],
            root=os.path.dirname(os.path.realpath(__file__)),
            allow_logging_basic_config=False
        )
        got_request_exception.connect(rollbar.contrib.flask.report_exception, app)

    class CustomRequest(Request):
        @property
        def rollbar_person(self):
def main(sim_dir, proc_time, num_jobs, jobs_per, in_gb, out_gb, out_gb_dl,
         up_rate, down_rate, bid_ratio, instance_type, av_zone, product,
         csv_file=None):
    '''
    Function to calculate spot instance run statistics based on job
    submission parameters; this function will save the statistics and
    specific spot history in csv dataframes to execution directory

    Parameters
    ----------
    sim_dir : string
        base directory where to create the availability zone folders
        for storing the simulation results
    proc_time : float
        the number of minutes a single job of interest takes to run
    num_jobs : integer
        total number of jobs to run to complete job submission
    jobs_per : integer
        the number of jobs to run per node
    in_gb : float
        the total amount of input data for a particular job (in GB)
    out_gb : float
        the total amount of output data from a particular job (in GB)
    out_gb_dl : float
        the total amount of output data to download from EC2 (in GB)
    up_rate : float
        the average upload rate to transfer data to EC2 (in Mb/s)
    down_rate : float
        the average download rate to transfer data from EC2 (in Mb/s)
    bid_ratio : float
        the ratio to average spot history price to set the bid price to
    instance_type : string
        type of instance to run the jobs on and to get spot history for
    av_zone : string
        the AWS EC2 availability zone (sub-region) to get spot history
        from
    product : string
        the type of operating system product to get spot history for
    csv_file : string (optional), default is None
        the filepath to a csv dataframe to get spot history from;
        if not specified, the function will just get the most recent 90
        days worth of spot price history

    Returns
    -------
    spot_history : pd.DataFrame object
        in addition to saving this as './spot_history.csv' the
        dataframe can also be returned as an object in memory
    stat_df : pd.DataFrame object
        in addition to saving this as './<info>_stats.csv' the
        dataframe can also be returned as an object in memory
    '''

    # Import packages
    import dateutil
    import logging
    import numpy as np
    import os
    import pandas as pd
    import yaml

    # Import local packages
    import utils
    from record_spot_price import return_spot_history

    # Init variables
    proc_time *= 60.0
    num_nodes = min(np.ceil(float(num_jobs)/jobs_per), 20)

    # Init simulation market results dataframe
    sim_df_cols = ['start_time', 'spot_hist_csv', 'proc_time', 'num_datasets',
                   'jobs_per_node', 'num_jobs_iter', 'bid_ratio', 'bid_price',
                   'median_history', 'mean_history', 'stdev_history',
                   'compute_time', 'wait_time', 'per_node_cost',
                   'num_interrupts', 'first_iter_time']
    sim_df = pd.DataFrame(columns=sim_df_cols)

    # Init full run stats data frame
    stat_df_cols = ['Total cost', 'Instance cost', 'Storage cost', 'Tranfer cost',
                    'Total time', 'Run time', 'Wait time',
                    'Upload time', 'Download time']
    stat_df = pd.DataFrame(columns=stat_df_cols)

    # Set up logger
    base_dir = os.path.join(sim_dir, av_zone)
    if not os.path.exists(base_dir):
        try:
            os.makedirs(base_dir)
        except OSError as exc:
            print 'Found av zone directory %s, continuing...' % av_zone
    log_path = os.path.join(base_dir, '%s_%d-jobs_%.3f-bid.log' % \
                            (instance_type, num_jobs, bid_ratio))
    stat_log = utils.setup_logger('stat_log', log_path, logging.INFO, to_screen=True)

    # Check to see if simulation was already run (sim csv file exists)
    sim_csv = os.path.join(base_dir, '%s_%d-jobs_%.3f-bid_sim.csv' % \
                           (instance_type, num_jobs, bid_ratio))
    if os.path.exists(sim_csv):
        stat_log.info('Simulation file %s already exists, skipping...' % sim_csv)
        return

    # Calculate number of iterations given run configuration
    # Round up and assume that we're waiting for all jobs to finish
    # before terminating nodes
    num_iter = np.ceil(num_jobs/float((jobs_per*num_nodes)))
    stat_log.info('With %d jobs, %d nodes, and %d jobs running per node...\n' \
                  'job iterations: %d' % (num_jobs, num_nodes, jobs_per, num_iter))

    # Get spot price history, if we're getting it from a csv dataframe
    if csv_file:
        # Parse dataframe to form history
        spot_history = spothistory_from_dataframe(csv_file, instance_type,
                                                  product, av_zone)
        # Get rid of any duplicated timestamps
        spot_history = spot_history.groupby(spot_history.index).first()

    # Otherwise, just grab latest 90 days
    else:
        sh_list = return_spot_history(None, instance_type, product, av_zone)

        # Convert history into just timepoints and prices list of tuples
        timestamps = [dateutil.parser.parse(sh.timestamp) for sh in sh_list]
        prices = [sh.price for sh in sh_list]

        # Use pandas timeseries and sort in oldest -> newest
        spot_history = pd.Series(prices, timestamps)
        spot_history = spot_history.sort_index()

        # Write spot history to disk
        sh_csv = os.path.join(os.getcwd(), 'spot_history.csv')
        spot_history.to_csv(sh_csv)

    # Get interpolated times per second (forward fill)
    interp_seq = pd.date_range(spot_history.index[0], spot_history.index[-1],
                               freq='S')
    interp_history = spot_history.reindex(interp_seq)
    interp_history = interp_history.fillna(method='ffill')

    # Init simulation time series
    sim_seq = pd.date_range(interp_seq[0], interp_seq[-1], freq='20T')
    sim_series = interp_history[sim_seq]

    # Init loop variables
    sim_idx = 0
    sim_length = len(sim_series)
    beg_time = spot_history.index[0]
    end_time = spot_history.index[-1]
    time_needed = num_iter*(proc_time)

    # Get bid price
    spot_history_avg = interp_history.mean()
    bid_price = bid_ratio*spot_history_avg
    stat_log.info('Spot history average is $%.3f, bid ratio of %.3fx sets ' \
                  'bid to $%.3f' % (spot_history_avg, bid_ratio, bid_price))

    # Iterate through the interpolated timeseries
    for start_time, start_price in sim_series.iteritems():
        # First see if there's enough time to run jobs
        time_window = (end_time-start_time).total_seconds()
        if time_needed > time_window:
            stat_log.info('Total runtime exceeds time window, ending simulation...')

        # Simulate running job and get stats from that start time
        try:
            run_time, wait_time, pernode_cost, num_interrupts, first_iter_time = \
                    simulate_market(start_time, spot_history, interp_history,
                                    proc_time, num_iter, bid_price)
        except Exception as exc:
            stat_log.info('Could not run full simulation because of:\n%s' % exc)
            continue

        # Write simulate market output to dataframe
        sim_df.loc[sim_idx] = [start_time, csv_file, proc_time, num_jobs,
                               jobs_per, num_iter, bid_ratio, bid_price,
                               np.mean(spot_history), np.median(spot_history),
                               np.std(spot_history), run_time, wait_time,
                               pernode_cost, num_interrupts, first_iter_time]

        # Get complete time and costs from spot market simulation parameters
        total_cost, instance_cost, stor_cost, xfer_cost, \
        total_time, run_time, wait_time, \
        xfer_up_time, xfer_down_time = \
                calc_ebs_model_costs(run_time, wait_time, pernode_cost,
                                     first_iter_time, num_jobs, num_nodes,
                                     jobs_per, av_zone, in_gb, out_gb,
                                     out_gb_dl, up_rate, down_rate)

        # Add to output dataframe
        stat_df.loc[sim_idx] = [total_cost, instance_cost, stor_cost, xfer_cost,
                                total_time/60.0, run_time/60.0, wait_time/60.0,
                                xfer_up_time/60.0, xfer_down_time/60.0]

        # Print stats
        stat_log.info('Total cost: $%.3f' % total_cost)
        stat_log.info('Total time (minutes): %.3f' % (total_time/60.0))
        stat_log.info('run time (minutes): %.3f' % (run_time/60.0))
        stat_log.info('per-node cost: $%.3f' % pernode_cost)
        stat_log.info('number of interrupts: %d' % num_interrupts)
        stat_log.info('wait time (minutes): %.3f' % (wait_time/60.0))

        sim_idx += 1
        utils.print_loop_status(sim_idx, sim_length)

    # Add configuration parameters to dataframe
    sim_df['av_zone'] = av_zone
    sim_df['in_gb'] = in_gb
    sim_df['out_gb'] = out_gb
    sim_df['out_gb_dl'] = out_gb_dl
    sim_df['up_rate'] = up_rate
    sim_df['down_rate'] = down_rate

    # Write simulation dataframe to disk
    sim_df.to_csv(sim_csv)

    # Write stats dataframe to disk
    stat_csv = os.path.join(base_dir, '%s_%d-jobs_%.3f-bid_stats.csv' % \
                           (instance_type, num_jobs, bid_ratio))
    stat_df.to_csv(stat_csv)

    # Write parameters yaml to disk
    params_yml = os.path.join(base_dir, '%s_%d-jobs_%.3f-bid_params.yml' % \
                              (instance_type, num_jobs, bid_ratio))

    params = {'proc_time' : proc_time,
              'num_jobs' : num_jobs,
              'jobs_per' : jobs_per,
              'in_gb' : in_gb,
              'out_gb' : out_gb,
              'out_gb_dl' : out_gb_dl,
              'up_rate' : up_rate,
              'down_rate' : down_rate,
              'bid_ratio' : bid_ratio,
              'instance_type' : instance_type,
              'av_zone' : av_zone,
              'product' : product,
              'csv_file' : csv_file}

    with open(params_yml, 'w') as y_file:
        y_file.write(yaml.dump(params))

    # Give simulation-wide statistics
    interrupt_avg = sim_df['num_interrupts'].mean()
    time_avg = stat_df['Total time'].mean()
    cost_avg = stat_df['Total cost'].mean()

    # Print simulation statistics
    stat_log.info('\n' + 72*'-')
    stat_log.info('Submission of %d job iterations, ' \
                  'each takes %.3f mins to run:' % (num_iter, proc_time/60.0))
    stat_log.info('Average spot history price for %s in %s\n' \
                  'between %s and %s is: $%.3f' % \
                  (instance_type, av_zone, beg_time, end_time, spot_history_avg))
    stat_log.info('Spot ratio of %.3fx the average price set bid to $%.3f' % \
                  (bid_ratio, bid_price))
    stat_log.info('Average total time (mins): %f' % time_avg)
    stat_log.info('Average total cost: $%.3f' % cost_avg)
    stat_log.info('Average number of interruptions: %.3f' % interrupt_avg)
    stat_log.info(72*'-' + '\n')

    # Return dataframes
    return spot_history, sim_df, stat_df
Exemple #52
0
import subprocess
from pathlib import Path
from tempfile import NamedTemporaryFile

import h5py
import librosa
import numpy as np
import torch
import torch.utils.data as data
import tqdm
from scipy.io import wavfile

import utils
from utils import mu_law

logger = utils.setup_logger(__name__, 'data.log')


def random_of_length(seq, length):
    limit = seq.size(0) - length
    if length < 1:
        # logging.warning("%d %s" % (length, path))
        return None

    start = random.randint(0, limit)
    end = start + length
    return seq[start:end]


class EncodedFilesDataset(data.Dataset):
    """
Exemple #53
0
__date__ = "30/07/2012"
__copyright__ = "Copyright 2012, Australia Indonesia Facility for " "Disaster Reduction"

import os
import sys
import logging
from urllib2 import URLError
from zipfile import BadZipfile

from ftp_client import FtpClient
from sftp_client import SFtpClient
from utils import setup_logger, data_dir, is_event_id
from shake_event import ShakeEvent

# Loading from package __init__ not working in this context so manually doing
setup_logger()
LOGGER = logging.getLogger("InaSAFE")


def process_event(event_id=None, locale="en"):
    """Launcher that actually runs the event processing.

    :param event_id: The event id to process. If None the latest event will
       be downloaded and processed.
    :type event_id: str

    :param locale: The locale that will be used. Default to en.
    :type locale: str
    """
    population_path = os.path.join(data_dir(), "exposure", "IDN_mosaic", "popmap10_all.tif")
Exemple #54
0
        optimizer = Adam(shared_model.parameters(), lr=args.lr)
        criterion = nn.BCELoss()
        dataset = np.load(
            os.path.join(Dataset_Dir,
                         '{}_all'.format(Tag_Name[Tag_Dict[args.tag]]),
                         "%s_all.npz" % Tag_Name[Tag_Dict[args.tag]]))
        targets = dataset["arr_0"]
        max_accuracy = 0.0

        while True:
            args.epoch += 1
            print('=====> Train at epoch %d, Learning rate %0.6f <=====' %
                  (args.epoch, args.lr))
            start_time = time.time()
            log = setup_logger(
                0, 'epoch%d' % args.epoch,
                os.path.join(args.log_dir, 'epoch%d_log.txt' % args.epoch))
            log.info('Train time ' + time.strftime(
                "%Hh %Mm %Ss", time.gmtime(time.time() - start_time)) + ', ' +
                     'Training started.')

            order = list(range(targets.shape[0]))
            random.shuffle(order)
            losses = 0
            correct_cnt = 0

            for i in range(targets.shape[0]):
                idx = order[i]
                if dataset["arr_%d" % (idx + 1)].shape[0] == 0:
                    continue
Exemple #55
0
def test(args, shared_model, env_conf):
    log = {}
    setup_logger('{}_log'.format(args.env),
                 r'{0}{1}_log'.format(args.log_dir, args.env))
    log['{}_log'.format(args.env)] = logging.getLogger('{}_log'.format(
        args.env))
    d_args = vars(args)
    for k in d_args.keys():
        log['{}_log'.format(args.env)].info('{0}: {1}'.format(k, d_args[k]))

    torch.manual_seed(args.seed)
    env = atari_env(args.env, env_conf)
    reward_sum = 0
    start_time = time.time()
    num_tests = 0
    reward_total_sum = 0
    player = Agent(None, env, args, None)
    player.model = A3Clstm(player.env.observation_space.shape[0],
                           player.env.action_space)
    player.state = player.env.reset()
    player.state = torch.from_numpy(player.state).float()
    player.model.eval()

    while True:
        if player.done:
            player.model.load_state_dict(shared_model.state_dict())

        player.action_test()
        reward_sum += player.reward

        if player.done:
            num_tests += 1
            player.current_life = 0
            reward_total_sum += reward_sum
            reward_mean = reward_total_sum / num_tests
            log['{}_log'.format(args.env)].info(
                "Time {0}, episode reward {1}, episode length {2}, reward mean {3:.4f}"
                .format(
                    time.strftime("%Hh %Mm %Ss",
                                  time.gmtime(time.time() - start_time)),
                    reward_sum, player.eps_len, reward_mean))

            sendStatElastic({
                "score": reward_sum,
                'agent_name': 'pytorch-test',
                'game_name': 'a3c-pytorch-SpaceInvaders-v0',
                'episode': num_tests,
                'frame_count': 0,
                'episode_length': player.eps_len
            })

            if reward_sum > args.save_score_level:
                player.model.load_state_dict(shared_model.state_dict())
                state_to_save = player.model.state_dict()
                torch.save(state_to_save,
                           '{0}{1}.dat'.format(args.save_model_dir, args.env))

            reward_sum = 0
            player.eps_len = 0
            state = player.env.reset()
            time.sleep(60)
            player.state = torch.from_numpy(state).float()
Exemple #56
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Training")
    parser.add_argument("--config_file", default="", help="path to config file", type=str)
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,nargs=argparse.REMAINDER)
    args = parser.parse_args()
    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)
    num_gpus = torch.cuda.device_count()
    logger = setup_logger('reid_baseline', output_dir, 0)
    logger.info('Using {} GPUS'.format(num_gpus))
    logger.info('Running with config:\n{}'.format(cfg))
    if cfg.INPUT.SEPNORM.USE:
        train_dl, val_dl, num_query, num_classes = make_sepnorm_dataloader(cfg, num_gpus)
    elif cfg.DATASETS.EXEMPLAR.USE:
        train_dl, val_dl, num_query, num_classes,exemplar_dl = make_dataloader(cfg, num_gpus)
    else:
        train_dl, val_dl, num_query, num_classes = make_dataloader(cfg, num_gpus)

    model = build_model(cfg, num_classes)
    loss = make_loss(cfg, num_classes)
    if cfg.SOLVER.CENTER_LOSS.USE == True:
        trainer = CenterTrainer(cfg, model, train_dl, val_dl,
                      loss, num_query, num_gpus)
    else:
        if cfg.SOLVER.MIXUP.USE:
            trainer = NegMixupTrainer(cfg, model, train_dl, val_dl,
                              loss, num_query, num_gpus)
        elif cfg.DATASETS.EXEMPLAR.USE:
            if cfg.DATASETS.EXEMPLAR.MEMORY.USE:
                trainer = ExemplarMemoryTrainer(cfg, model, train_dl, val_dl,exemplar_dl,
                                  loss, num_query, num_gpus)
            else:
                trainer = UIRLTrainer(cfg, model, train_dl, val_dl,exemplar_dl,
                                  loss, num_query, num_gpus)
        elif cfg.DATASETS.HIST_LABEL.USE:
            trainer = HistLabelTrainer(cfg, model, train_dl, val_dl,
                    loss, num_query, num_gpus)
        else:
            trainer = BaseTrainer(cfg, model, train_dl, val_dl,
                              loss, num_query, num_gpus)
    if cfg.INPUT.SEPNORM.USE:
        logger.info('train transform0: \n{}'.format(train_dl.dataset.transform0))
        logger.info('train transform1: \n{}'.format(train_dl.dataset.transform1))

        logger.info('valid transform0: \n{}'.format(val_dl.dataset.transform0))
        logger.info('valid transform1: \n{}'.format(val_dl.dataset.transform1))

    else:
        logger.info('train transform: \n{}'.format(train_dl.dataset.transform))
        logger.info('valid transform: \n{}'.format(val_dl.dataset.transform))
    logger.info(type(model))
    logger.info(loss)
    logger.info(trainer)
    for epoch in range(trainer.epochs):
        for batch in trainer.train_dl:
            trainer.step(batch)
            trainer.handle_new_batch()
        trainer.handle_new_epoch()
Exemple #57
0
    def __init__(self, profile, **kwargs):
        """Constructor

        Args:
            profile (string): AWS profile
            **kwargs: Multiple arguments

        Raises:
            TypeError: Description
        """

        self.log_level = kwargs.pop("log_level", logging.WARNING)
        self.boto_log_level = kwargs.pop("boto_log_level", logging.WARNING)

        # Setup logger
        self.logger = setup_logger(__name__, self.log_level, self.boto_log_level)

        # Get AWS Session
        self.session = boto3.Session(profile_name=profile)
        self.logger.info("AWS Session created")

        # Get AWS EC2 Resource
        self.ec2 = self.session.resource("ec2")
        self.logger.info("AWS EC2 resource created")

        # Get AWS EC2 Client
        self.ec2_client = self.ec2.meta.client
        self.logger.info("AWS EC2 client created")

        self.eni_mapping = kwargs.pop("eni_mappings", settings.ENI_MAPPING)

        self.cidr_suffix_ips_number_mapping = kwargs.pop(
            "cidr_suffix_ips_number_mapping",
            settings.CIDR_SUFFIX_IPS_NUMBER_MAPPING
        )

        self.tag_base_name = kwargs.pop("tag_base_name", settings.TAG_BASE_NAME)

        self.hvm_only_instance_types = kwargs.pop(
            "hvm_only_instance_types",
            settings.HVM_ONLY_INSTANCE_TYPES
        )

        if kwargs:
            raise TypeError("Unexpected **kwargs: %r" % kwargs)

        self.config = {
            "vpcs": {},
            "instances_groups": []
        }

        resources_params = {
            "ec2": self.ec2,
            "ec2_client": self.ec2_client,
            "tag_base_name": self.tag_base_name,
            "log_level": self.log_level,
            "boto_log_level": self.boto_log_level
        }

        self.vpcs = Vpcs(**resources_params)
        self.internet_gateways = InternetGateways(**resources_params)
        self.subnets = Subnets(**resources_params)
        self.security_groups = SecurityGroups(**resources_params)
        self.route_tables = RouteTables(**resources_params)
        self.network_acls = NetworkAcls(**resources_params)
        self.network_interfaces = NetworkInterfaces(**resources_params)
        self.instances = Instances(**resources_params)
def test(args, shared_model, env_conf):
    ptitle('Test Agent')
    gpu_id = args.gpu_ids[-1]
    log = {}
    setup_logger('{}_log'.format(args.env),
                 r'{0}{1}_log'.format(args.log_dir, args.env))
    log['{}_log'.format(args.env)] = logging.getLogger(
        '{}_log'.format(args.env))
    d_args = vars(args)
    for k in d_args.keys():
        log['{}_log'.format(args.env)].info('{0}: {1}'.format(k, d_args[k]))

    torch.manual_seed(args.seed)
    if gpu_id >= 0:
        torch.cuda.manual_seed(args.seed)
    print("test proc:")
    env = AllowBacktracking(make_local_env(env_conf['game'], env_conf['level'], stack=False, scale_rew=False))
    print("test got env:", env.observation_space)
    reward_sum = 0
    start_time = time.time()
    num_tests = 0
    reward_total_sum = 0
    player = Agent(None, env, args, None)
    player.gpu_id = gpu_id
    player.model = A3Clstm(
        player.env.observation_space.shape[0], player.env.action_space)

    player.state = player.env.reset()
    player.eps_len += 2
    player.state = torch.from_numpy(player.state).float()
    if gpu_id >= 0:
        with torch.cuda.device(gpu_id):
            player.model = player.model.cuda()
            player.state = player.state.cuda()
    flag = True
    max_score = 0
    while True:
        if flag:
            if gpu_id >= 0:
                with torch.cuda.device(gpu_id):
                    player.model.load_state_dict(shared_model.state_dict())
            else:
                player.model.load_state_dict(shared_model.state_dict())
            player.model.eval()
            flag = False

        player.action_test()
        reward_sum += player.reward

        """
        if player.done and player.info['ale.lives'] > 0 and not player.max_length:
            state = player.env.reset()
            player.eps_len += 2
            player.state = torch.from_numpy(state).float()
            if gpu_id >= 0:
                with torch.cuda.device(gpu_id):
                    player.state = player.state.cuda()
        """
        if player.done or player.max_length:
            flag = True
            num_tests += 1
            reward_total_sum += reward_sum
            reward_mean = reward_total_sum / num_tests
            log['{}_log'.format(args.env)].info(
                "Time {0}, episode reward {1}, episode length {2}, reward mean {3:.4f}".
                format(
                    time.strftime("%Hh %Mm %Ss",
                                  time.gmtime(time.time() - start_time)),
                    reward_sum, player.eps_len, reward_mean))

            if args.save_max and reward_sum >= max_score:
                max_score = reward_sum
                if gpu_id >= 0:
                    with torch.cuda.device(gpu_id):
                        state_to_save = player.model.state_dict()
                        torch.save(state_to_save, '{0}{1}.dat'.format(args.save_model_dir, args.env))
                else:
                    state_to_save = player.model.state_dict()
                    torch.save(state_to_save, '{0}{1}.dat'.format(args.save_model_dir, args.env))

            reward_sum = 0
            player.eps_len = 0
            state = player.env.reset()
            player.eps_len += 2
            time.sleep(10)
            player.state = torch.from_numpy(state).float()
            if gpu_id >= 0:
                with torch.cuda.device(gpu_id):
                    player.state = player.state.cuda()
Exemple #59
0
    parser.add_argument("outfile", help="Output File")
    parser.add_argument("start", help="Start Time", type=float)
    parser.add_argument("--debug", help="Debug", action="store_true")
    parser.add_argument("--output-prefix", help="File prefix in output",
                        default=".")

    return parser.parse_args()


if __name__ == "__main__":
    args = _get_args()
    session_dir = os.path.join(conf.get_opt("session_dir"), args.session)
    status_file = os.path.join(session_dir, args.volume,
                               "%s.status" % urllib.quote_plus(args.brick))
    status_file_pre = status_file + ".pre"
    mkdirp(os.path.join(session_dir, args.volume), exit_on_err=True,
           logger=logger)
    mkdirp(os.path.join(conf.get_opt("log_dir"), args.session, args.volume),
           exit_on_err=True)
    log_file = os.path.join(conf.get_opt("log_dir"),
                            args.session,
                            args.volume,
                            "brickfind.log")
    setup_logger(logger, log_file, args.debug)

    time_to_update = int(time.time())
    brickfind_crawl(args.brick, args)
    with open(status_file_pre, "w", buffering=0) as f:
        f.write(str(time_to_update))
    sys.exit(0)
from utils import setup_logger
from settings import run_folder

### SET all LOGGER_DISABLED to True to disable logging
### WARNING: the mcts log file gets big quite quickly

LOGGER_DISABLED = {
'main':False
, 'memory':False
, 'tourney':False
, 'mcts':False
, 'model': False}


logger_mcts = setup_logger('logger_mcts', run_folder + 'logs/logger_mcts.log')
logger_mcts.disabled = LOGGER_DISABLED['mcts']

logger_main = setup_logger('logger_main', run_folder + 'logs/logger_main.log')
logger_main.disabled = LOGGER_DISABLED['main']

logger_tourney = setup_logger('logger_tourney', run_folder + 'logs/logger_tourney.log')
logger_tourney.disabled = LOGGER_DISABLED['tourney']

logger_memory = setup_logger('logger_memory', run_folder + 'logs/logger_memory.log')
logger_memory.disabled = LOGGER_DISABLED['memory']

logger_model = setup_logger('logger_model', run_folder + 'logs/logger_model.log')
logger_model.disabled = LOGGER_DISABLED['model']