Ejemplo n.º 1
0
  def __init__(self, conf_file=None, environment=None):
    self.log = utils.setupLogging()
    self.containers = {}
    self.templates = {}
    self.state = 'live'

    if environment:
      self.load(environment)      
    else:
      # If we didn't get an absolute path to a file, look for it in the current directory.
      if not conf_file.startswith('/'):
        conf_file = os.path.join(os.path.dirname(sys.argv[0]), conf_file)

      data = open(conf_file, 'r')
      self.config = yaml.load(data)
      
    # On load, order templates into the proper startup sequence      
    self.start_order = utils.order(self.config['templates'])
Ejemplo n.º 2
0
import shutil
import sys
import os

import utils
from decoderState import DecoderState
from x86.decoder import X86Decoder
from strategy.linearSweep import LinearSweepDecoder
from strategy.recursiveDescent import RecursiveDescent

# Testers...
#from test import fromUnit
#from test import fromOnline   # not needed... blerg!
#from test import fromExample

utils.setupLogging()


def parseArgs():

    parser = argparse.ArgumentParser()
    #parser.add_argument("x", type=int, help="the base")
    #parser.add_argument("y", type=int, help="the exponent")

    parser.add_argument("-b",
                        "--binary",
                        nargs=1,
                        help="Disassemble the given binary file.")

    parser.add_argument(
        "-v",
Ejemplo n.º 3
0
'''
Created on 22 Nov 2017

@author: martinr
'''

import logging
from utils import setupLogging, versionInfo, getAllConf, getInputData, sparkEnv

setupLogging()
sparkEnv()
from pyspark.sql import SparkSession  # @UnresolvedImport
from pyspark.sql.types import StructType, StructField, LongType, DateType, StringType, Row  # @UnresolvedImport

spark = SparkSession.builder.appName("SparkTest").master(
    "local[50]").getOrCreate()

logger = logging.getLogger(__name__)
logger.info("Starting....")
[logger.info(s) for s in versionInfo(spark).split('\n')]
[logger.info(s) for s in getAllConf(spark).split('\n')]

empSchema = StructType([
    StructField("emp_no", LongType(), False),
    StructField("birth_date", DateType(), False),
    StructField("first_name", StringType(), True),
    StructField("last_name", StringType(), True),
    StructField("gender", StringType(), True),
    StructField("hire_date", DateType(), False)
])
Ejemplo n.º 4
0
"""
This script collects the premine (for which the regtest keys are publicly
known) into the current wallet, so that sufficient balance is available
for all testing.
"""

import utils

from xayagametest import premine

import argparse
import sys

desc = "Collects the regtest Xaya premine"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--xaya_rpc_url",
                    required=True,
                    help="JSON-RPC interface of regtest Xaya Core")
parser.add_argument("--address",
                    default="",
                    help="can be set to override the target address")
args = parser.parse_args()

logger = utils.setupLogging()
rpc = utils.connectRegtestRpc(args.xaya_rpc_url, logger)

addr = args.address
if addr == "":
    addr = None
premine.collect(rpc, addr, logger)
Ejemplo n.º 5
0
        json_txt = json.dumps(out_labels)
        out_file = save_img_dir + "/real_output_labels.json"
        with open(out_file, 'w') as f:
            f.write(json_txt)

    def generate(self):
        """
        Intialize a generic detectnet data generator class. It finds the filenames for canvas and paste images, and
        labels, and splits them into train and validation spilts.
        """
        logging.info("Initializing image dataset generator ...")

        logging.info("Generating training images.")
        self.generateImages(self.canvas_img_files, self.paste_labels,
                            self.train_img_dir, self.train_label_dir)

        logging.info("Finished generating images.")


if __name__ == "__main__":
    np.random.seed(long(time.time()))

    utils.setupLogging('real_image_gen')

    args = processArgs()
    gen = RealImageDataGen(args)

    gen.initialize()

    gen.generate()
        """
        Intialize a generic detectnet data generator class. It finds the filenames for canvas and paste images, and
        labels, and splits them into train and validation spilts.
        """
        logging.info("Initializing image dataset generator ...")

        logging.info("Generating training images.")
        self.generateImages(self.canvas_train_img_files,
                            self.paste_train_img_files, self.train_img_dir,
                            self.train_label_dir)

        self.all_paste_files_used = False

        logging.info("Generating validation images.")
        self.generateImages(self.canvas_val_img_files,
                            self.paste_val_img_files, self.val_img_dir,
                            self.val_label_dir)

        logging.info("Finished generating images.")


if __name__ == "__main__":
    utils.setupLogging('detectnet_data_gen')

    args = processArgs()
    gen = DetectNetDataGenerator(args)

    gen.initialize()

    gen.generate()
Ejemplo n.º 7
0
    img[where[0], where[1]] = contour_img[where[0], where[1]]

    # utils.showAndWait('streamers', img)

    # detector = cv2.SimpleBlobDetector_create()
    # keypoints = detector.detect(img2)
    # im_with_keypoints = cv2.drawKeypoints(img2, keypoints, np.array([]), (0, 0, 255),
    #                                       cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    # utils.showAndWait('streamers', im_with_keypoints)

    # return contour_img
    return img


if __name__ == "__main__":
    utils.setupLogging('analyze_shot')

    args = processArgs()

    if not os.path.exists(args.input_video):
        raise RuntimeError("input video does not exist: {}".format(args.input_video))

    # Create a VideoCapture object and read from input file
    cap = cv2.VideoCapture(args.input_video)

    # Check if camera opened successfully
    if (cap.isOpened() == False):
        raise RuntimeError("video file not opened.")

    out_cap = cv2.VideoCapture(0)
    out = cv2.VideoWriter(args.output_video, get_video_type(args.output_video), 25, get_dims(out_cap, res))
        out_file = save_img_dir + "/sim_output_labels.json"
        with open(out_file, 'w') as f:
            f.write(json_txt)

    def generate(self):
        """
        Intialize a generic detectnet data generator class. It finds the filenames for canvas and paste images, and
        labels, and splits them into train and validation spilts.
        """
        logging.info("Initializing image dataset generator ...")

        logging.info("Generating training images.")
        self.generateImages(self.canvas_img_files, self.paste_img_files,
                            self.paste_label_dir, self.train_img_dir,
                            self.train_label_dir)

        logging.info("Finished generating images.")


if __name__ == "__main__":
    np.random.seed(long(time.time()))

    utils.setupLogging('sim_image_gen')

    args = processArgs()
    gen = SimImageDataGen(args)

    gen.initialize()

    gen.generate()
Ejemplo n.º 9
0
                                5:(final_mask.shape[1] - 5)]

        # utils.showAndWait('final_mask', final_mask)
        # cv2.imwrite(self.save_dir + '/final_mask.png', final_mask)

        # Save the new label image.
        label_out = self.save_dir + '/' + base_name + '_label.png'
        logging.info("saving label file {}".format(label_out))
        cv2.imwrite(label_out, final_mask)

    def generate(self):

        for label_file in self.label_files:
            # label_file = '/media/dcofer/Ubuntu_Data/drone_images/drones/30_a.png'
            logging.info("processing {}".format(label_file))
            orig_filename = os.path.basename(label_file)
            orig_basename = os.path.splitext(orig_filename)[0]

            self.generateLabelFile(label_file, orig_basename)


if __name__ == "__main__":
    utils.setupLogging('segment_data_gen')

    args = processArgs()
    gen = SimImageDataGen(args)

    gen.initialize()

    gen.generate()
                        help='Data dir containing images.')
    parser.add_argument('--label_dir',
                        type=str,
                        required=True,
                        help='dir where labels are stored.')
    parser.add_argument('--output_file',
                        type=str,
                        required=True,
                        help='sloth json file that will be generated.')

    args, unknown = parser.parse_known_args()
    return args


if __name__ == "__main__":
    utils.setupLogging('yolo_to_sloth')

    args = processArgs()

    img_files = utils.findFilesOfType(args.image_dir,
                                      ['jpg', 'jpeg'])  # 'png',

    json_labels = []

    for img_file in img_files:
        orig_filename = os.path.basename(img_file)
        orig_basename = os.path.splitext(orig_filename)[0]
        label_file = args.label_dir + '/' + orig_basename + '.txt'

        labels = utils.loadYoloLabels(label_file)
Ejemplo n.º 11
0
def main():
    """
    Command line stuff...
    """

    try:
        utils.setupLogging("koan")
    except:
        # most likely running RHEL3, where we don't need virt logging anyway
        pass

    p = opt_parse.OptionParser()
    p.add_option("-k", "--kopts",
                 dest="kopts_override",
                 help="append additional kernel options")
    p.add_option("-l", "--list",
                 dest="list_items",
                 help="lists remote items (EX: profiles, systems, or images)")
    p.add_option("-v", "--virt",
                 dest="is_virt",
                 action="store_true",
                 help="install new virtual guest")
    p.add_option("-V", "--virt-name",
                 dest="virt_name",
                 help="use this name for the virtual guest")
    p.add_option("-r", "--replace-self",
                 dest="is_replace",
                 action="store_true",
                 help="reinstall this host at next reboot")
    p.add_option("-D", "--display",
                 dest="is_display",
                 action="store_true",
                 help="display the configuration stored in cobbler for the given object")
    p.add_option("-p", "--profile",
                 dest="profile",
                 help="use this cobbler profile")
    p.add_option("-y", "--system",
                 dest="system",
                 help="use this cobbler system")
    p.add_option("-i", "--image",
                 dest="image",
                 help="use this cobbler image")
    p.add_option("-s", "--server",
                 dest="server",
                 help="attach to this cobbler server")
    p.add_option("-t", "--port",
                 dest="port",
                 help="cobbler xmlrpc port (default 25151)")
    p.add_option("-P", "--virt-path",
                 dest="virt_path",
                 help="override virt install location")  
    p.add_option("-T", "--virt-type",
                 dest="virt_type",
                 help="override virt install type")
    p.add_option("-B", "--virt-bridge",
                 dest="virt_bridge",
                 help="override virt bridge")
    p.add_option("-n", "--nogfx",
                 action="store_true", 
                 dest="no_gfx",
                 help="disable Xen graphics (xenpv,xenfv)")
    p.add_option("", "--add-reinstall-entry",
                 dest="add_reinstall_entry",
                 action="store_true",
                 help="when used with --replace-self, just add entry to grub, do not make it the default")
    p.add_option("-C", "--livecd",
                 dest="live_cd",
                 action="store_true",
                 help="used by the custom livecd only, not for humans")

    (options, args) = p.parse_args()

    if not os.getuid() == 0:
        print "koan requires root access"
        return 3

    try:
        k = Koan()
        k.list_items          = options.list_items
        k.server              = options.server
        k.is_virt             = options.is_virt
        k.is_replace          = options.is_replace
        k.is_display          = options.is_display
        k.profile             = options.profile
        k.system              = options.system
        k.image               = options.image
        k.live_cd           = options.live_cd
        k.virt_path           = options.virt_path
        k.virt_type           = options.virt_type
        k.virt_bridge         = options.virt_bridge
        k.no_gfx              = options.no_gfx
        k.add_reinstall_entry = options.add_reinstall_entry
        k.kopts_override      = options.kopts_override

        if options.virt_name is not None:
            k.virt_name          = options.virt_name
        if options.port is not None:
            k.port              = options.port
        k.run()

    except Exception, e:
        (xa, xb, tb) = sys.exc_info()
        try:
            getattr(e,"from_koan")
            print str(e)[1:-1] # nice exception, no traceback needed
        except:
            print xa
            print xb
            print string.join(traceback.format_list(traceback.extract_tb(tb)))
        return 1
Ejemplo n.º 12
0
                        help='json file to use for training.')
    parser.add_argument('--val_json',
                        type=str,
                        required=True,
                        help='json file to use for val.')
    parser.add_argument('--image_dir',
                        type=str,
                        required=True,
                        help='directory where images are located.')

    args, unknown = parser.parse_known_args()
    return args


if __name__ == "__main__":
    utils.setupLogging('split_json_train_val')

    args = processArgs()

    train_labels = []
    val_labels = []

    with open(args.root_json, "r") as read_file:
        labels = json.load(read_file)

    if len(labels) <= 0:
        raise RuntimeError("Labels were empty.")

    with open(args.train_list, "r") as read_file:
        train_files = read_file.readlines()
                                scaled_base_file))

            if min_file_area > 0:
                min_areas.append(min_area)

        logging.info("Finished val images.")

        avg_min_areas = np.average(min_areas)
        std_min_areas = np.std(min_areas)
        logging.info(
            "Average min area detected: {} +- {} from {} images.".format(
                avg_min_areas, std_min_areas, len(min_areas)))

        # json_txt = json.dumps(all_labels)
        # out_file = self.save_dir + "/real_scaled_labels.json"
        # with open(out_file, 'w') as f:
        #     f.write(json_txt)


if __name__ == "__main__":
    np.random.seed(long(time.time()))

    utils.setupLogging('analyze_scaled_imgs')

    args = processArgs()
    gen = AnalyzeScaledImages(args)

    gen.initialize()

    gen.generate()
Ejemplo n.º 14
0
def main():
    
    # ====================================================================
    # setup logging and gpu
    # ====================================================================
    utils.setupLogging()
    utils.setupGPUenvironment()

    if FLAGS.exp == 'celebA':
        opts = configs.config_celebA
    elif FLAGS.exp == 'celebA_small':
        opts = configs.config_celebA_small
    elif FLAGS.exp == 'mnist':
        opts = configs.config_mnist
    elif FLAGS.exp == 'mnist_small':
        opts = configs.config_mnist_small
    elif FLAGS.exp == 'dsprites':
        opts = configs.config_dsprites
    elif FLAGS.exp == 'grassli':
        opts = configs.config_grassli
    elif FLAGS.exp == 'grassli_small':
        opts = configs.config_grassli_small
    elif FLAGS.exp == 'mog':
        opts = configs.config_mog
    else:
        assert False, 'Unknown experiment configuration'

    opts['mode'] = FLAGS.mode
    if opts['mode'] == 'test':
        assert FLAGS.checkpoint is not None, 'Checkpoint must be provided'
        opts['checkpoint'] = FLAGS.checkpoint

    if FLAGS.zdim is not None:
        opts['zdim'] = FLAGS.zdim
    if FLAGS.pz is not None:
        opts['pz'] = FLAGS.pz
    if FLAGS.lr is not None:
        opts['lr'] = FLAGS.lr
    if FLAGS.w_aef is not None:
        opts['w_aef'] = FLAGS.w_aef
    if FLAGS.z_test is not None:
        opts['z_test'] = FLAGS.z_test
    if FLAGS.lambda_schedule is not None:
        opts['lambda_schedule'] = FLAGS.lambda_schedule
    if FLAGS.work_dir is not None:
        opts['work_dir'] = FLAGS.work_dir
    if FLAGS.wae_lambda is not None:
        opts['lambda'] = FLAGS.wae_lambda
    if FLAGS.enc_noise is not None:
        opts['e_noise'] = FLAGS.enc_noise

    if opts['verbose']:
        logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(message)s')
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
    utils.create_dir(opts['work_dir'])
    utils.create_dir(os.path.join(opts['work_dir'],
                     'checkpoints'))

    if opts['e_noise'] == 'gaussian' and opts['pz'] != 'normal':
        assert False, 'Gaussian encoders compatible only with Gaussian prior'
        return

    # Dumping all the configs to the text file
    with utils.o_gfile((opts['work_dir'], 'params.txt'), 'w') as text:
        text.write('Parameters:\n')
        for key in opts:
            text.write('%s : %s\n' % (key, opts[key]))

    # Loading the dataset
    data = DataHandler(opts)
    assert data.num_points >= opts['batch_size'], 'Training set too small'

    if opts['mode'] == 'train':

        # Creating WAE model
        wae = WAE(opts, data.num_points)

        # Training WAE
        wae.train(data)

    elif opts['mode'] == 'test':

        # Do something else
        improved_wae.improved_sampling(opts)
Ejemplo n.º 15
0
  lemma = word if (lemmas is None or len(lemmas) == 0) else lemmas[0]

  res = DICT.query(lemma)
  res = {} if res is None else res
  res['lemma'] = lemma
  res['query'] = word
  return res
  
@app.route('/api/ecdict/status')
def status():
    return json.dumps({'status': True }), 200, {'content-type':'application/json'} 

@app.route('/api/ecdict/query', methods=['GET', 'POST'])
def tts():
  if request.method == 'POST':
    words = request.json['words']
  else:
    words = request.args['words'].split(',')

  results = [query(word) for word in words]
  res = {}
  res['results'] = results
  res['ok'] = True
  return json.dumps(res), 200, {'content-type':'application/json'}

if __name__ == "__main__":
  env = os.environ.get("ENV", default="dev")
  setupLogging(logging.INFO)
  logging.info(f'use env {env}')
  app.run(host='0.0.0.0', debug = env != 'prod')
  logging.info(f'start the server done!')
Ejemplo n.º 16
0
import csv
import random
from nk import sim, model

runConfigs = [  
#                [sim.SearchMethod.STEEPEST, 2, False],
#                [sim.SearchMethod.STEEPEST, 2, False],
#                [sim.SearchMethod.STEEPEST, 1, False],
#                [sim.SearchMethod.STEEPEST, 1, True],
#                [sim.SearchMethod.GREEDY, 2, True],
#                [sim.SearchMethod.STEEPEST, 2, False],
#                [sim.SearchMethod.GREEDY, 1, False],
                [sim.SearchMethod.RANDOMTHENSTEEPEST, 2, True]
            ]
random.seed(utils.getDefaultSeedObject())
utils.setupLogging();

results = []
resFile = open(settings.RESULTS_CSV_FILE, 'a')
resWriter = csv.writer(resFile)
header = ["N", "K", "Fitness", "SearchMethod", "Distance", "CumulativeDistance", "NumberOfLandscapes", "StdDevFitness", "AttemptedFlips", "AcceptedFlips"]
resWriter.writerow(header)
logger =  logging.getLogger(__name__)
landscapes = settings.landscapes
for nVal in settings.nList:
    for kVal in settings.kList:
        if kVal <= nVal:
            if kVal == nVal:
                realK = kVal-1
            else:
                realK = kVal
Ejemplo n.º 17
0
import shutil
import sys
import os

import utils
from decoderState import DecoderState
from x86.decoder import X86Decoder
from strategy.linearSweep import LinearSweepDecoder
from strategy.recursiveDescent import RecursiveDescent

# Testers...
from test import fromUnit
#from test import fromOnline   # not needed... blerg!
from test import fromExample

utils.setupLogging()

def parseArgs():

    parser = argparse.ArgumentParser()
    #parser.add_argument("x", type=int, help="the base")
    #parser.add_argument("y", type=int, help="the exponent")

    parser.add_argument("-b", "--binary", nargs=1, help="Disassemble the given binary file.")

    parser.add_argument("-v", "--verbosity", action="count", default=0, help="Show verbosity. Add more -v's to show more detail")

    parser.add_argument("--recursive-descent", action="store_true", help="Use the recursive descent method. ")

    parser.add_argument("--linear-sweep", action="store_true", help="Use the linear sweep method. ")