Ejemplo n.º 1
0
def main(argv):
    """ The main method for this module.
    """
    logfile = os.path.join(os.getcwd(), 'log-mrlite-scheduler.txt')
    config_logging(logfile)
    start_time = time.time()
    logging.info('Job started at %s' % time.asctime())
    parser = MRLiteOptionParser()
    options, args = parser.parse_args(argv)
    scheduler = MRLiteJobScheduler(options)
    try:
        scheduler.start_jobs()
        scheduler.monitor_jobs()
        scheduler.quit_jobs()
        logging.info('Job finished at %s' % time.asctime())
        logging.info('Job run for %.3f seconds' % (time.time() - start_time))
    except KeyboardInterrupt:
        logging.info('Interrupted by CTRL-C')
        scheduler.kill_jobs()
        sys.exit(-1)
    except Exception:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        mesg = ''.join(
            traceback.format_exception(exc_type, exc_value, exc_traceback))
        logging.info('cache exception in mrlite.py')
        logging.info(mesg)
        scheduler.kill_jobs()
        sys.exit(-1)
Ejemplo n.º 2
0
def main(argv):
    """ The main method for this moduler.
    """
    if len(argv) != 2:
        print 'the config file must be specified.'
        sys.exit(1)

    logfile = os.path.join(os.getcwd(), 'brook-log-scheduler.txt')
    config_logging(logfile)
    start_time = time.time()
    logging.info('Job started at %s' %time.asctime())
    configparser = ConfigFileParser(argv[1])
    options = configparser.parser()
    logging.info('Parse config file options at %s' %time.asctime())
    scheduler = BrookJobScheduler(options)
    logging.info('Initialize of scheduler success at %s' %time.asctime())
    try:
        scheduler.start_jobs()
        scheduler.monitor_jobs()
        scheduler.quit_jobs()
        logging.info('Job finished at %s' %time.asctime())
        logging.info('Job run for %.3f seconds' %(time.time() - start_time))
    except KeyboardInterrupt:
        logging.info('Interrupted by CTRL-C')
        scheduler.kill_jobs()
        sys.exit(-1)
    except Exception:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        mesg = ''.join(traceback.format_exception(exc_type, exc_value,
                                                  exc_traceback))
        logging.info('cache exception in brook_scheduler.py')
        logging.info(mesg)
        scheduler.kill_jobs()
        sys.exit(-1)
Ejemplo n.º 3
0
    def __init__(self, server, port, rank, dir):
        """ Establish communication with scheduler
        """
        self.options = None
        logfile = os.path.join(dir, 'log-mrlite-rank-%s.txt' %rank)
        config_logging(logfile)
        logging.debug('communicator %s started in %s:%s at %s' %(
            rank, server, port, time.asctime()))
        sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sockobj.connect((server, port))
        sock = SocketWrapper(sockobj)
        sock.send('rank %s %s' %(rank, os.getpid()))
        pickled_options = sock.recv()
        options = pickle.loads(pickled_options)

        self.task_callback = {
            'start_mapper' : None,
            'start_reducer' : None,
            'status': self.report_status,
            'quit'  : self.quit,
            'exit'  : self.quit,
        }
        self.server = server
        self.rank = rank
        self.sock = sock
        self.options = options
        self.process = None
        if options.maponly_mode:
            self.worker = MapOnlyWorker(options, rank, sock)
        elif rank < options.num_map_worker:
            self.worker = MapWorker(options, rank, sock)
        else:
            self.worker = ReduceWorker(options, rank, sock)
Ejemplo n.º 4
0
    def __init__(self, server, port, rank, dir):
        """ Establish communication with scheduler
        """
        self.options = None
        logfile = os.path.join(dir, 'log-mrlite-rank-%s.txt' % rank)
        config_logging(logfile)
        logging.debug('communicator %s started in %s:%s at %s' %
                      (rank, server, port, time.asctime()))
        sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sockobj.connect((server, port))
        sock = SocketWrapper(sockobj)
        sock.send('rank %s %s' % (rank, os.getpid()))
        pickled_options = sock.recv()
        options = pickle.loads(pickled_options)

        self.task_callback = {
            'start_mapper': None,
            'start_reducer': None,
            'status': self.report_status,
            'quit': self.quit,
            'exit': self.quit,
        }
        self.server = server
        self.rank = rank
        self.sock = sock
        self.options = options
        self.process = None
        if options.maponly_mode:
            self.worker = MapOnlyWorker(options, rank, sock)
        elif rank < options.num_map_worker:
            self.worker = MapWorker(options, rank, sock)
        else:
            self.worker = ReduceWorker(options, rank, sock)
Ejemplo n.º 5
0
def main(argv):
    """ The main method for this module.
    """
    logfile = os.path.join(os.getcwd(), 'log-mrlite-scheduler.txt')
    config_logging(logfile)
    start_time = time.time()
    logging.info('Job started at %s' %time.asctime())
    parser = MRLiteOptionParser()
    options, args = parser.parse_args(argv)
    scheduler = MRLiteJobScheduler(options)
    try:
        scheduler.start_jobs()
        scheduler.monitor_jobs()
        scheduler.quit_jobs()
        logging.info('Job finished at %s' %time.asctime())
        logging.info('Job run for %.3f seconds' %(time.time() - start_time))
    except KeyboardInterrupt:
        logging.info('Interrupted by CTRL-C')
        scheduler.kill_jobs()
        sys.exit(-1)
    except Exception:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        mesg = ''.join(traceback.format_exception(exc_type, exc_value,
                                                  exc_traceback))
        logging.info('cache exception in mrlite.py')
        logging.info(mesg)
        scheduler.kill_jobs()
        sys.exit(-1)
Ejemplo n.º 6
0
def main(argv):
    """ The main method for this moduler.
    """
    if len(argv) != 2:
        print 'the config file must be specified.'
        sys.exit(1)

    logfile = os.path.join(os.getcwd(), 'brook-log-scheduler.txt')
    config_logging(logfile)
    start_time = time.time()
    logging.info('Job started at %s' % time.asctime())
    configparser = ConfigFileParser(argv[1])
    options = configparser.parser()
    logging.info('Parse config file options at %s' % time.asctime())
    scheduler = BrookJobScheduler(options)
    logging.info('Initialize of scheduler success at %s' % time.asctime())
    try:
        scheduler.start_jobs()
        scheduler.monitor_jobs()
        scheduler.quit_jobs()
        logging.info('Job finished at %s' % time.asctime())
        logging.info('Job run for %.3f seconds' % (time.time() - start_time))
    except KeyboardInterrupt:
        logging.info('Interrupted by CTRL-C')
        scheduler.kill_jobs()
        sys.exit(-1)
    except Exception:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        mesg = ''.join(
            traceback.format_exception(exc_type, exc_value, exc_traceback))
        logging.info('cache exception in brook_scheduler.py')
        logging.info(mesg)
        scheduler.kill_jobs()
        sys.exit(-1)
def main(command_line_arguments):
    config_logging()

    parser = argparse.ArgumentParser(
        description=
        """Wrapper around docker run that protects against Zombie containers""",
        epilog="")

    parser.add_argument(
        "-u",
        "--user",
        help="Username or UID (format: <name|uid>[:<group|gid>])",
        default=None)

    parser.add_argument("-v",
                        "--volume",
                        action='append',
                        type=_volume_mount,
                        help="Bind mount a volume",
                        default=[])

    parser.add_argument("--cap-add",
                        help="Add Linux capabilities",
                        action="append",
                        type=str,
                        default=[])

    parser.add_argument("--runtime",
                        help="Runtime to use for this container",
                        default=None)

    parser.add_argument("--name",
                        help="Assign a name to the container",
                        default=None)

    parser.add_argument("image", metavar="IMAGE")
    parser.add_argument("command", metavar="COMMAND")
    parser.add_argument("args", nargs='*', metavar="ARG")

    args = parser.parse_args(args=command_line_arguments)
    docker_client = SafeDockerClient()
    return docker_client.run(
        args.image, **{
            "command": " ".join(list(chain([args.command] + args.args))),
            "user": args.user,
            "runtime": args.runtime,
            "name": args.name,
            "volumes": reduce(lambda dct, v: {
                **dct,
                **v
            }, args.volume, {}),
            "cap_add": args.cap_add
        })
Ejemplo n.º 8
0
def main(command_line_arguments):
    config_logging()

    parser = argparse.ArgumentParser(
        description="Safe docker login utility to avoid leaking passwords",
        epilog="")
    parser.add_argument("--secret-name",
                        help="Secret name",
                        type=str,
                        required=True)

    parser.add_argument("--secret-endpoint-url",
                        help="Endpoint Url",
                        type=str,
                        default=os.environ.get("DOCKERHUB_SECRET_ENDPOINT_URL",
                                               None))

    parser.add_argument("--secret-endpoint-region",
                        help="AWS Region",
                        type=str,
                        default=os.environ.get(
                            "DOCKERHUB_SECRET_ENDPOINT_REGION", None))

    args = parser.parse_args(args=command_line_arguments)

    if args.secret_endpoint_url is None:
        raise RuntimeError(
            "Could not determine secret-endpoint-url, please specify with --secret-endpoint-url"
        )

    if args.secret_endpoint_region is None:
        raise RuntimeError(
            "Could not determine secret-endpoint-region, please specify with --secret-endpoint-region"
        )

    try:
        login_dockerhub(args.secret_name, args.secret_endpoint_url,
                        args.secret_endpoint_region)
    except Exception as err:
        logging.exception(err)
        exit(1)
Ejemplo n.º 9
0
    msgtype = parse_unsigned_byte(stream)
    if not msg_spec[msgtype]:
        raise UnsupportedPacketException(msgtype)
    logger.debug("%s trying to parse message type %x" % (side, msgtype))
    msg_parser = msg_spec[msgtype]
    msg = msg_parser.parse(stream)
    msg['raw_bytes'] = stream.packet_finished()
    return Message(msg)


if __name__ == "__main__":
    logging.basicConfig(level=logging.ERROR)
    (host, port, opts, pcfg) = parse_args()

    if opts.logfile:
        util.config_logging(opts.logfile)

    if opts.loglvl:
        logging.root.setLevel(getattr(logging, opts.loglvl.upper()))

    # Install signal handler.
    signal.signal(signal.SIGINT, sigint_handler)

    while True:
        cli_sock = wait_for_client(opts.locport)

        # Set up client/server main-in-the-middle.
        sleep(0.05)
        MinecraftSession(pcfg, cli_sock, host, port)

        # I/O event loop.
Ejemplo n.º 10
0
#!/usr/bin/env python

import gzip
from pipeline import *
import util

config = util.read_config('import')
log = util.config_logging(config)

table = 'area.area'
columns=('area', 'zip', 'po_name', 'geom')
filename = 'bayareadata.gz'
area = 'sfbayarea'
db = util.DB.from_config(config)

log.info('importing file %r to table %r' % (filename, table))

# compose import pipeline
cat(gzip.open(filename)) | skip(head=2, tail=2) | split(sep='|') |\
    transform([lambda r: area, 0, 1, 2]) |\
    join('\t') | load_data(db, table, columns=columns, clean=True)
Ejemplo n.º 11
0
 def __init__(self):
     self.config = read_config(self.source_name)
     self.log = config_logging(self.config).getLogger(self.source_name)
     self.db = DB.from_config(self.config)
Ejemplo n.º 12
0
    venue = os.path.join(api_v1, 'venues/<id:int>'),
    venue_nearby = os.path.join(api_v1, 'venues/<id:int>/nearby'),
    categories = os.path.join(api_v1, 'categories'),
    category = os.path.join(api_v1, 'categories/<id:int>'),
    category_venues = os.path.join(api_v1, 'categories/<id:int>/venues'),
    zips = os.path.join(api_v1, 'zips'),
    zip = os.path.join(api_v1, 'zips/<zip>'),
    zip_venues = os.path.join(api_v1, 'zips/<zip>/venues'),
)


### setup


config = util.read_config("api")
log = util.config_logging(config).getLogger("server")
# connection pool
pool = psycopg2.pool.ThreadedConnectionPool(
    minconn=1,
    maxconn=int(config.max_connections or 10),
    dsn=config.db,
    cursor_factory=psycopg2.extras.RealDictCursor
)
# install plugin
install(bottle_pgpool.PgSQLPoolPlugin(pool))


###  helpers


def fq_url(*path, **subst):
Ejemplo n.º 13
0
Archivo: proxy.py Proyecto: Palats/mc3p
    msgtype = parse_unsigned_byte(stream)
    if not msg_spec[msgtype]:
        raise UnsupportedPacketException(msgtype)
    logger.debug("%s trying to parse message type %x" % (side, msgtype))
    msg_parser = msg_spec[msgtype]
    msg = msg_parser.parse(stream)
    msg['raw_bytes'] = stream.packet_finished()
    return Message(msg)


if __name__ == "__main__":
    logging.basicConfig(level=logging.ERROR)
    (host, port, opts, pcfg) = parse_args()

    if opts.logfile:
        util.config_logging(opts.logfile)

    if opts.loglvl:
        logging.root.setLevel(getattr(logging, opts.loglvl.upper()))

    # Install signal handler.
    signal.signal(signal.SIGINT, sigint_handler)

    while True:
        cli_sock = wait_for_client(opts.locport)

        # Set up client/server main-in-the-middle.
        sleep(0.05)
        MinecraftSession(pcfg, cli_sock, host, port)

        # I/O event loop.
Ejemplo n.º 14
0
#!/usr/bin/env python

import gzip
from pipeline import *
import util

config = util.read_config('import')
log = util.config_logging(config)

table = 'area.area'
columns = ('area', 'zip', 'po_name', 'geom')
filename = 'bayareadata.gz'
area = 'sfbayarea'
db = util.DB.from_config(config)

log.info('importing file %r to table %r' % (filename, table))

# compose import pipeline
cat(gzip.open(filename)) | skip(head=2, tail=2) | split(sep='|') |\
    transform([lambda r: area, 0, 1, 2]) |\
    join('\t') | load_data(db, table, columns=columns, clean=True)
def main(args):
    util.config_logging()
    # Settings
    lr = args.lr
    beta1 = args.beta1
    batch_size = args.batch_size
    iterations = args.iterations
    snapshot = args.snapshot
    stepsize = args.stepsize
    display = args.display

    path_source_cnn = './output/source_cnn'
    output_dir = os.path.join('output', 'target_cnn')
    save_path = os.path.join(output_dir, 'target_cnn.ckpt')
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Load source data
    train_mat = loadmat('./data/svhn/train_32x32.mat')
    train_images = train_mat['X'].transpose((3, 0, 1, 2))
    train_images = train_images.astype(np.float32) / 255.
    RGB2GRAY = np.array([0.2989, 0.5870, 0.1140], dtype=np.float32)
    train_images = np.sum(np.multiply(train_images, RGB2GRAY),
                          3,
                          keepdims=True)
    # Load target data
    target_images = util._read_images(
        './data/mnist/train-images-idx3-ubyte.gz')
    # Data generator
    idg = ImageDataGenerator()
    source_data_gen = idg.flow(train_images,
                               batch_size=batch_size,
                               shuffle=True)
    target_data_gen = idg.flow(target_images,
                               batch_size=batch_size,
                               shuffle=True)

    # Define graph
    nb_classes = 10
    tf.reset_default_graph()
    x_source = tf.placeholder(tf.float32, (None, 32, 32, 1))
    x_source_resized = tf.image.resize_images(x_source, [28, 28])
    x_target = tf.placeholder(tf.float32, (None, 28, 28, 1))

    feature_src = source_cnn(x_source_resized,
                             nb_classes=nb_classes,
                             trainable=False,
                             adapt=True)
    feature_target = target_cnn(x_target, nb_classes, trainable=True)
    d_logits_src = discriminator(feature_src)
    d_logits_target = discriminator(feature_target, reuse=True)

    # Loss: Discriminator
    d_loss_src = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=d_logits_src, labels=tf.ones_like(d_logits_src)))
    d_loss_target = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=d_logits_target, labels=tf.zeros_like(d_logits_target)))
    d_loss = d_loss_src + d_loss_target
    # Loss: target CNN
    target_loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=d_logits_target, labels=tf.ones_like(d_logits_target)))

    t_vars = tf.trainable_variables()
    target_vars = [
        var for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                         scope='target_cnn')
    ]
    d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
    src_vars = [
        var for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                         scope='source_cnn')
    ]

    lr_var = tf.Variable(lr, name='learning_rate', trainable=False)
    optimizer = tf.train.AdamOptimizer(lr_var, beta1)
    target_train_op = optimizer.minimize(target_loss, var_list=target_vars)
    d_train_op = optimizer.minimize(d_loss, var_list=d_vars)

    # Train
    source_saver = tf.train.Saver(var_list=src_vars)
    target_saver = tf.train.Saver(var_list=target_vars)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    target_losses = deque(maxlen=10)
    d_losses = deque(maxlen=10)
    bar = tqdm(range(iterations))
    bar.set_description('(lr: {:.0e})'.format(lr))
    bar.refresh()
    losses = []

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        source_saver.restore(sess, tf.train.latest_checkpoint(path_source_cnn))
        for i in bar:
            batch_source = next(source_data_gen)
            batch_target = next(target_data_gen)

            target_loss_val, d_loss_val, _, _ = sess.run(
                [target_loss, d_loss, target_train_op, d_train_op],
                feed_dict={
                    x_source: batch_source,
                    x_target: batch_target
                })
            target_losses.append(target_loss_val)
            d_losses.append(d_loss_val)
            losses.append([target_loss_val, d_loss_val])
            if i % display == 0:
                logging.info(
                    '{:20} Target: {:5.4f} (avg: {:5.4f})'
                    '    Discriminator: {:5.4f} (avg: {:5.4f})'.format(
                        'Iteration {}:'.format(i), target_loss_val,
                        np.mean(target_losses), d_loss_val, np.mean(d_losses)))
            if stepsize is not None and (i + 1) % stepsize == 0:
                lr = sess.run(lr_var.assign(lr * 0.1))
                logging.info('Changed learning rate to {:.0e}'.format(lr))
                bar.set_description('(lr: {:.0e})'.format(lr))
            if (i + 1) % snapshot == 0:
                snapshot_path = target_saver.save(sess, save_path)
                logging.info('Saved snapshot to {}'.format(snapshot_path))

    # Save visualization of training losses
    losses = np.array(losses)
    plt.plot(losses.T[0], label='Target CNN Loss', alpha=0.5)
    plt.plot(losses.T[1], label='Discriminator Loss', alpha=0.5)
    plt.title('Training Losses')
    plt.legend()
    plt.savefig('./losses.png')
Ejemplo n.º 16
0
    def parse_args(self):
        """
        Configure the job from command line arguments.
        """

        parser = argparse.ArgumentParser(description=self.description)
        parser.add_argument("-c",
                            "--config-file",
                            help="Config file locations",
                            action='append')
        parser.add_argument("-d", "--run-dir", nargs='?', help="Job run dir")
        parser.add_argument("-o",
                            "--out",
                            nargs='?',
                            help="File for component stdout")
        parser.add_argument("-e",
                            "--err",
                            nargs='?',
                            help="File for component stderr")
        parser.add_argument(
            "-l",
            "--log",
            nargs='?',
            help="File for logging output (default is print to terminal)")
        parser.add_argument("-L",
                            "--level",
                            nargs='?',
                            help="Global log level")
        parser.add_argument("-s",
                            "--job-steps",
                            type=int,
                            default=-1,
                            help="Job steps to run (single number)")
        parser.add_argument("-i",
                            "--job-id",
                            type=int,
                            help="Job ID from JSON job store",
                            default=None)
        parser.add_argument("script",
                            nargs='?',
                            help="Path or name of job script")
        parser.add_argument("params",
                            nargs='?',
                            help="Job param file in JSON format")

        # TODO: CL option to disable automatic copying of ouput files.
        #       The files should be symlinked if copying is disabled.
        # parser.add_argument("--no-copy-output-files", help="Disable copying of output files")
        #parser.add_argument('--feature', dest='feature', action='store_true')
        #parser.add_argument('--no-feature', dest='feature', action='store_false')
        #parser.set_defaults(feature=True)

        cl = parser.parse_args(self.args)

        if cl.run_dir:
            self.rundir = cl.run_dir

        if cl.level:
            num_level = getattr(logging, cl.level.upper(), None)
            if not isinstance(num_level, int):
                raise ValueError('Invalid log level: %s' % num_level)
            logging.getLogger('hpsmc').setLevel(num_level)
            #print("Set log level of hpsmc: %s" % logging.getLevelName(logging.getLogger('hpsmc').getEffectiveLevel()))

        if cl.log:
            self.log_file = cl.log
            if not os.path.isabs(self.log_file):
                self.log_file = os.path.abspath(self.log_file)
            config_logging(stream=open(self.log_file, 'w'))

        if cl.out:
            self.out_file = cl.out
            if not os.path.isabs(self.out_file):
                self.out_file = os.path.abspath(self.out_file)
                logger.info('Changed stdout file to abs path: %s' %
                            self.out_file)

        if cl.err:
            self.err_file = cl.err
            if not os.path.isabs(self.err_file):
                self.err_file = os.path.abspath(self.err_file)
                logger.info('Changed stderr file to abs path: %s' %
                            self.err_file)

        if cl.config_file:
            self.config_files = list(map(os.path.abspath, cl.config_file))
        else:
            self.config_files = []

        self.job_steps = cl.job_steps

        if cl.script:
            self.script = cl.script
        else:
            raise Exception('Missing required script name or location.')

        # Params are actually optional as some job scripts might not need them.
        if cl.params:
            self.param_file = os.path.abspath(cl.params)
            params = {}
            if cl.job_id:
                # Load data from a job store containing multiple jobs.
                self.job_id = cl.job_id
                logger.info("Loading job with ID %d from job store '%s'" %
                            (self.job_id, self.param_file))
                jobstore = JobStore(self.param_file)
                if jobstore.has_job_id(self.job_id):
                    params = jobstore.get_job(self.job_id)
                else:
                    raise Exception(
                        "No job id %d was found in the job store '%s'" %
                        (self.job_id, self.param_file))
            else:
                # Load data from a JSON file with a single job definition.
                logger.info('Loading job parameters from file: %s' %
                            self.param_file)
                params = load_json_data(self.param_file)
                if not isinstance(params, dict):
                    raise Exception(
                        'Job ID must be provided when running from a job store.'
                    )

            self._load_params(params)
Ejemplo n.º 17
0
    msgtype = parse_unsigned_byte(stream)
    if not msg_spec[msgtype]:
        raise UnsupportedPacketException(msgtype)
    logger.debug("%s trying to parse message type %x" % (side, msgtype))
    msg_parser = msg_spec[msgtype]
    msg = msg_parser.parse(stream)
    msg['raw_bytes'] = stream.packet_finished()
    return Message(msg)


if __name__ == "__main__":
    logging.basicConfig(level=logging.ERROR)
    (host, port, opts, pcfg) = parse_args()

    if opts.logfile:
        config_logging(opts.logfile)

    if opts.loglvl:
        logging.root.setLevel(getattr(logging, opts.loglvl.upper()))

    if opts.user:
        while True:
            password = getpass("Minecraft account password: "******"Authenticating with %s" % opts.user)
            if auth.check():
                break
            logger.error("Authentication failed")
        logger.debug("Credentials are valid")

    if opts.authenticate or opts.password_file:
Ejemplo n.º 18
0
from util import config_logging

# Enable default logging config
config_logging()
Ejemplo n.º 19
0
Archivo: api.py Proyecto: kuulemart/tp
    venue=os.path.join(api_v1, "venues/<id:int>"),
    venue_nearby=os.path.join(api_v1, "venues/<id:int>/nearby"),
    categories=os.path.join(api_v1, "categories"),
    category=os.path.join(api_v1, "categories/<id:int>"),
    category_venues=os.path.join(api_v1, "categories/<id:int>/venues"),
    zips=os.path.join(api_v1, "zips"),
    zip=os.path.join(api_v1, "zips/<zip>"),
    zip_venues=os.path.join(api_v1, "zips/<zip>/venues"),
)


### setup


config = util.read_config("api")
log = util.config_logging(config).getLogger("server")
# connection pool
pool = psycopg2.pool.ThreadedConnectionPool(
    minconn=1, maxconn=int(config.max_connections or 10), dsn=config.db, cursor_factory=psycopg2.extras.RealDictCursor
)
# install plugin
install(bottle_pgpool.PgSQLPoolPlugin(pool))


###  helpers


def fq_url(*path, **subst):
    """
    Builds fully qualified url. Uses bottle request object for scheme and loc
    info and works only within request
Ejemplo n.º 20
0
        while not self.closed:
            try:
                self.handle_read()
            except EOFException:
                break
            gevent.sleep()
        self.close()
        self.handle_close()


if __name__ == "__main__":
    logging.basicConfig(level=logging.ERROR)
    (host, port, opts, pcfg) = parse_args()

    if opts.logfile:
        config_logging(opts.logfile)

    if opts.loglvl:
        logging.root.setLevel(getattr(logging, opts.loglvl.upper()))

    if opts.user:
        while True:
            password = getpass("Minecraft account password: "******"Authenticating with %s" % opts.user)
            if auth.valid():
                break
            logger.error("Authentication failed")
        logger.debug("Credentials are valid")

    if opts.authenticate or opts.password_file:
Ejemplo n.º 21
0
 def __init__(self):
     self.config = read_config(self.source_name)
     self.log = config_logging(self.config).getLogger(self.source_name)
     self.db = DB.from_config(self.config)
def main(args):
    util.config_logging()
    # Parameters
    lr = args.lr
    batch_size = args.batch_size
    iterations = args.iterations
    snapshot = args.snapshot
    stepsize = args.stepsize
    display = args.display
    output_dir = 'output/source_cnn/'
    output_dir_clf = 'output/classifier/'
    save_path = os.path.join(output_dir, 'source_cnn.ckpt')
    save_path_clf = os.path.join(output_dir_clf, 'classifier.ckpt')
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    if not os.path.exists(output_dir_clf):
        os.makedirs(output_dir_clf)

    # Load train data
    train_mat = loadmat('./data/svhn/train_32x32.mat')
    train_images = train_mat['X'].transpose((3, 0, 1, 2))
    train_images = train_images.astype(np.float32) / 255.
    train_labels = train_mat['y'].squeeze()
    train_labels[train_labels == 10] = 0
    RGB2GRAY = np.array([0.2989, 0.5870, 0.1140], dtype=np.float32)
    train_images = np.sum(np.multiply(train_images, RGB2GRAY),
                          3,
                          keepdims=True)
    # Data generator
    idg = ImageDataGenerator()
    train_data_gen = idg.flow(train_images,
                              train_labels,
                              batch_size=batch_size,
                              shuffle=True)

    # Define graph
    nb_classes = 10
    tf.reset_default_graph()
    x = tf.placeholder(tf.float32, (None, 32, 32, 1))
    x_resized = tf.image.resize_images(x, [28, 28])
    t = tf.placeholder(tf.int32, (None, ))
    logits = source_cnn(x_resized, nb_classes=nb_classes, trainable=True)
    loss = tf.losses.sparse_softmax_cross_entropy(t, logits)
    lr_var = tf.Variable(lr, name='learning_rate', trainable=False)
    optimizer = tf.train.AdamOptimizer(learning_rate=lr)
    train_op = optimizer.minimize(loss)

    t_vars = tf.trainable_variables()
    source_cnn_vars = [
        var for var in t_vars if var.name.startswith('source_cnn')
    ]
    classifier_vars = [
        var for var in t_vars if var.name.startswith('classifier')
    ]

    # Train
    cnn_saver = tf.train.Saver(var_list=source_cnn_vars)
    if len(classifier_vars) > 0:
        clf_saver = tf.train.Saver(var_list=classifier_vars)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    losses = deque(maxlen=10)
    training_losses = []
    bar = tqdm(range(iterations))
    bar.set_description('(lr: {:.0e})'.format(lr))
    bar.refresh()

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        for i in bar:
            batch_images, batch_labels = next(train_data_gen)
            loss_val, _ = sess.run([loss, train_op],
                                   feed_dict={
                                       x: batch_images,
                                       t: batch_labels
                                   })
            losses.append(loss_val)
            training_losses.append(loss_val)
            if i % display == 0:
                logging.info('{:20} {:10.4f}     (avg: {:10.4f})'.format(
                    'Iteration {}:'.format(i), loss_val, np.mean(losses)))
            if stepsize is not None and (i + 1) % stepsize == 0:
                lr = sess.run(lr_var.assign(lr * 0.1))
                logging.info('Changed learning rate to {:.0e}'.format(lr))
                bar.set_description('(lr: {:.0e})'.format(lr))
            if (i + 1) % snapshot == 0:
                snapshot_path = cnn_saver.save(sess, save_path)
                if len(classifier_vars) > 0:
                    clf_saver.save(sess, save_path_clf)
                logging.info('Saved snapshot to {}'.format(snapshot_path))

    plt.plot(training_losses, label='Source CNN Loss')
    plt.title('Pre-Training Loss')
    plt.legend()
    plt.savefig('./pre_training_losses.png')