Пример #1
0
 def initUI(self):
     self.name_company = 'SPD'
     self.name_product = 'GanSta'
     self.stylesheet_filename = os.path.join(os.path.dirname(__file__),
                                             "qss/nodeeditor.qss")
     loadStylesheets(
         os.path.join(os.path.dirname(__file__), "qss/nodeeditor-dark.qss"),
         self.stylesheet_filename)
     self.empty_icon = QIcon(".")
     if DEBUG:
         print("Registered nodes:")
         pp(CALC_NODES)
     self.mdiArea = QMdiArea()
     self.mdiArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
     self.mdiArea.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
     self.mdiArea.setViewMode(QMdiArea.TabbedView)
     self.mdiArea.setDocumentMode(True)
     self.mdiArea.setTabsClosable(True)
     self.mdiArea.setTabsMovable(True)
     self.setCentralWidget(self.mdiArea)
     self.mdiArea.subWindowActivated.connect(self.updateMenus)
     self.windowMapper = QSignalMapper(self)
     self.windowMapper.mapped[QWidget].connect(self.setActiveSubWindow)
     self.createNodesDock()
     self.createOutputDock()
     self.createActions()
     self.createMenus()
     self.createToolBars()
     self.createStatusBar()
     self.updateMenus()
     self.readSettings()
     self.setWindowTitle("GanSta Alpha 1")
     self.emptyJson()
Пример #2
0
def tcpr_peer(pkt, ip):
    tcp = ip.getlayer(TCP)
    con = get_connection_peer(ip.dst, ip.src, tcp.dport, tcp.sport)

    #this is only for msgs geverated inhouse can be aviouded by better design
    if con is None:
        con = get_old_connection(ip.dst, ip.src, tcp.sport)

    if con is None and tcp.flags == "S":
        options = dict(tcp.options)
        sack = 1 if "SAckOK" in options else 0
        mss = options.get("MSS", None)
        wscale = options.get("WScale", None)
        con = Connection(local_ip=ip.dst,
                         remote_ip=ip.src,
                         local_port=tcp.dport,
                         remote_port=tcp.sport,
                         is_bind=True,
                         mss=mss,
                         ws=wscale,
                         sack=sack)
        con.register()
        pp("New Accept:", ip, con)

    pp("Peer:", ip, con)

    if con is not None:
        return con.pkt_incomming(pkt, ip, tcp)

    print("Did not find any connection")
    return _accept(pkt, ip)
    def get_representative_address(self, address):
        parameter_dict = self.config_dict.copy()
        parameter_dict["address"] = address
        del parameter_dict["url"]
        response = requests.get(self.config_dict["url"], params=parameter_dict)
        logger.info("Requesting URL: %s", self.config_dict["url"])
        logger.info("Request Parameters:\n%s", pp(parameter_dict))
        try:
            response_dict = response.json()
            logger.info("Response from Google:\n%s", pp(response_dict))

            if response.status_code != 200:
                logger.error("Google Civic API didn't return success:\n%s",
                             pp(response_dict))
                exit(1)

            if "officials" in response_dict:
                # Using the last legislator of all the legislators returned
                official_dict = response_dict["officials"][-1]
                _address_dict = {
                    "address_%s" % k: v
                    for k, v in official_dict["address"][0].items()
                }
                _address_dict["name"] = official_dict["name"]
                address_obj = AddressObj.get_address_obj(**_address_dict)
                return address_obj
            else:
                logger.error("Civic API didn't return any legislator")
                exit(1)
        except ValueError:
            logger.error("Civic API: unable to decode response json")
            exit(1)
Пример #4
0
 def _err(self, msg):
     print "Current game state:"
     utils.pp(self.game_state)
     print "\nCurrent player status:"
     utils.pp(self.player_status)
     print msg
     sys.exit(1)
Пример #5
0
 def _err(self, msg):
     print "Current game state:"
     utils.pp(self.game_state)
     print "\nCurrent player status:"
     utils.pp(self.player_status)
     print msg
     sys.exit(1)
Пример #6
0
def ff_layer_init(options, params, prefix='ff_', nin=None, nout=None):
    if nin == None and nout == None:
        return 0
    with tf.variable_scope(prefix):
        params[pp(prefix, 'W')] = tf.Variable(tf.truncated_normal([nin, nout]))
        params[pp(prefix, 'b')] = tf.Variable(tf.truncated_normal([
            nout,
        ]))
    return params
Пример #7
0
def main():
    # environment variables
    S3_ENDPOINT = os.environ['S3_ENDPOINT']
    AWS_REGION = os.environ['AWS_REGION']
    AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
    AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
    BUCKET_NAME = os.environ['BUCKET_NAME']
    STORAGE_PATH = os.environ['STORAGE_PATH']
    LAMBDA_ENDPOINT = os.environ['LAMBDA_ENDPOINT']

    # initialize s3 client
    session = boto3.session.Session()
    s3 = session.client(
        service_name='s3',
        aws_access_key_id=AWS_ACCESS_KEY_ID,
        aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
        endpoint_url=S3_ENDPOINT,
    )

    # test config
    print('\n--- Loading config')
    config_file = sys.argv[1]
    with open(config_file) as json_data:
        config = json.load(json_data)
        utils.pp(config)

    # wait for services to come up
    print('\n--- Waiting for environment to be up ---')
    print('\n---- 1. Lambda ----')
    utils.waitForLambda(LAMBDA_ENDPOINT)
    print('\n---- 2. S3 (localstack) ----')
    utils.waitForS3(s3)

    # copy files to virtual S3 (localstack)
    print('\n\n--- Initialize storage ---')
    print('\n---- 1. Create bucket ----')
    s3.create_bucket(Bucket=BUCKET_NAME)
    print('\n---- 2. Copy files to bucket ----')
    for file in config['files']:
        print(file)
        local_path = "{0}/{1}".format(STORAGE_PATH, file)
        s3.put_object(Bucket=BUCKET_NAME,
                      Key=file,
                      Body=open(local_path, 'rb'))

    # trigger the lambda for every file and validate elastic indices
    print('\n\n--- Lambda test ---')
    print('\n---- 1. Trigger PUT events ----')
    for file in config['files']:
        print(file)
        utils.executeLambda(BUCKET_NAME, file, AWS_REGION, LAMBDA_ENDPOINT)

    print('\n---- 2. Validate Tags on all images ----')
    for file in config['files']:
        print(file)
        utils.validateTags(s3, BUCKET_NAME, file, config['palette'])
Пример #8
0
def main(_):
  pp(flags.FLAGS.__flags)

  if FLAGS.input_width is None:
    FLAGS.input_width = FLAGS.input_height
  if FLAGS.output_width is None:
    FLAGS.output_width = FLAGS.output_height

  if not os.path.exists(FLAGS.checkpoint_dir):
    os.makedirs(FLAGS.checkpoint_dir)
  if not os.path.exists(FLAGS.sample_dir):
    os.makedirs(FLAGS.sample_dir)

  #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
  run_config = tf.ConfigProto()
  run_config.gpu_options.allow_growth=True

  with tf.Session(config=run_config) as sess:
    dcgan = DCGAN(
          sess,
          input_width=FLAGS.input_width,
          input_height=FLAGS.input_height,
          output_width=FLAGS.output_width,
          output_height=FLAGS.output_height,
          batch_size=FLAGS.batch_size,
          sample_num=FLAGS.batch_size,
          y_dim=FLAGS.y_dim,
          z_dim=FLAGS.z_dim,
          dataset_name=FLAGS.dataset,
          data_dir=FLAGS.data_dir,
          data_list=FLAGS.data_list,
          input_fname_pattern=FLAGS.input_fname_pattern,
          crop=FLAGS.crop,
          checkpoint_dir=FLAGS.checkpoint_dir,
          sample_dir=FLAGS.sample_dir,
          use_double_G_train=FLAGS.use_double_G_train,
          add_deconv=FLAGS.add_dense)

    show_all_variables()

    if FLAGS.train:
        train_dcgan(dcgan)

    if not FLAGS.train:
        if not dcgan.load(FLAGS.checkpoint_dir)[0]:
            raise Exception("[!] Train a model first, then run test mode")

    if FLAGS.test:
        test_dcgan(dcgan, sess)

    if FLAGS.generate:
        generated_file_pathes = generate_images(dcgan, sess)

    if FLAGS.generate_known_z:
        z_mean = np.loadtxt(FLAGS.known_z_path)
        generated_file_pathes = generate_images(dcgan, sess, z_mean)
Пример #9
0
def cprint(strang, quick=False):
    global scr
    curses.endwin()
    try:
        pp(strang)
    except:
        pp(str(repr(strang)))
    if not quick:
        input('↵!')
    scr = prep_curses()
Пример #10
0
    def update_tcp(self, pkt, ip, tcp, is_ongoing):
        if is_ongoing:
            tcp.seq = (tcp.seq - self.delta) % (2**32)
            tcp.sport = self.initial_local_port

        else:
            self.local_seq = tcp.ack
            tcp.ack = (tcp.ack + self.delta) % (2**32)
            tcp.dport = self.local_port

        del (tcp.chksum)
        pp("After update:", ip)
        rebuild_pkt(pkt, ip)
Пример #11
0
def read_images(target_dir, one=False, is_train=True):
    items = []
    file_names = sorted(os.listdir(os.path.join(target_dir, 'y')))
    count = len(file_names)
    i = 0
    for file_name in file_names:
        i += 1
        base_name, ext_name = os.path.splitext(file_name)
        pp(f'loading {base_name} {i}/{count}')
        items.append(Item(target_dir, base_name, is_train))
        if one:
            break
    pp(f'All images in {target_dir} have been loaded.')
    print('')
    return items
Пример #12
0
def get_config(FLAGS):
    if FLAGS.model == "rcmn":
        config = FLAGS
    elif FLAGS.model == "small":
        config = RcmnSmallConfig
    elif FLAGS.model == "large":
        config = RcmnLargeConfig
    else:
        raise ValueError(" [!] Invalid model: %s", FLAGS.model)

    if FLAGS.model == "ccmn":
        pp(FLAGS.__flags)
    else:
        pp(class_vars(config))

    return config
Пример #13
0
def get_config(FLAGS):
  if FLAGS.model == "rcmn":
    config = FLAGS
  elif FLAGS.model == "small":
    config = RcmnSmallConfig
  elif FLAGS.model == "large":
    config = RcmnLargeConfig
  else:
    raise ValueError(" [!] Invalid model: %s", FLAGS.model)

  if FLAGS.model == "ccmn":
    pp(FLAGS.__flags)
  else:
    pp(class_vars(config))

  return config
    def fetch_url(self, to_address, from_address, variable_dict):
        param_dict = defaultdict(dict)
        param_dict.update(self.config_dict)
        for variable in self.config_dict["html_variables"]:
            try:
                param_dict["merge_variables"][variable] = variable_dict[
                    variable]
            except KeyError:
                logger.error(
                    "Mismatch in html_variables specification in input and configuration file: %s",
                    variable)
                exit(1)

        del param_dict["html_variables"]
        logger.info("From Address: %s", from_address)
        logger.info("To Address: %s", to_address)
        logger.info("Parameters: \n%s", pp(param_dict))
        try:
            letter = lob.Letter.create(from_address=from_address._asdict(),
                                       to_address=to_address._asdict(),
                                       **param_dict)
            return letter.url
        except Exception as e:
            logger.error("Error: %s", e.message)
            exit(1)
Пример #15
0
def train(input_file='data/train.en',
          save_to='model/nnlm.ckpt',
          save_freq=100,
          disp_freq=10,
          batch_size=60,
          hidden_num=256,
          win_size=5,
          word_dim=500,
          lrate=0.001,
          num_epochs=10,
          grad_clip=10):

    model_options = locals().copy()
    #准备训练数据
    print 'Load data...',
    data_loader = TextLoader(input_file, batch_size, win_size)
    vocab_size = data_loader.vocab_size
    model_options['vocab_size'] = vocab_size

    print 'Done.'
    #定义图
    print 'Build Graph...',
    g = Graph(model_options, is_training=True)
    g.build_graph()
    print 'Done.'
    #优化器
    print 'Start Optimizing...'

    with tf.Session(graph=g.graph) as sess:
        sv = tf.train.Saver()  # 用于保存模型

        tf.global_variables_initializer().run()
        for e in range(model_options['num_epochs']):
            data_loader.reset_batch_pointer()
            for b in range(data_loader.num_batches):
                start = time.time()
                x_, y_ = data_loader.next_batch()
                feed = {g.x: x_, g.y: y_}

                train_loss, _ = sess.run([g.loss, g.optimizer], feed)
                end = time.time()

                #显示结果,和保存模型
                gs = sess.run(g.global_step)
                if gs % disp_freq == 0:
                    print(
                        "{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}"
                        .format(b, data_loader.num_batches, e, train_loss,
                                end - start))
                if gs % save_freq == 0:
                    sv.save(sess, save_to + str(gs))

            np.save('model/nnlm_word_embeddings',
                    g.params[pp('ff_input', 'embeddings')].eval())

    with open(save_to + '.json', 'w') as fp:
        json.dump(model_options, fp, indent=2)
    print 'Done.'
 def __init__(self, key, **lobapi_config_dict):
     lob.api_key = key
     self.config_dict = lobapi_config_dict
     for k, v in lobapi_default_config_dict.items():
         if k not in self.config_dict:
             self.config_dict[k] = v
     logger.info(
         "Configuration after reading configurations and adding defaults:\n%s",
         pp(self.config_dict))
Пример #17
0
def main():
    if len(argv) > 1:
        frame_paths = get_frame_paths(argv[1].replace('/', ''))
    else:
        frame_paths = get_frame_paths('ghost_sample_2_frames')
    try:
        scr = prep_curses()
        height, width = scr.getmaxyx()

        # Old cell values used to calculate the render mask
        # Filling with zeros will result in a full render on frame 0.
        # Actual pixel values are >= 256, which will never match 0.
        old_cells = numpy.zeros((height - 1, width))

        for frame_path in frame_paths:
            numpy_frame = cv2.imread(frame_path, cv2.IMREAD_COLOR)
            frame = resize_frame(numpy_frame,
                                 width,
                                 height - 1,
                                 half_height=False)

            numpy_cells = clamp_and_init(frame)
            render_mask = old_cells != numpy_cells

            for y, row in enumerate(numpy_cells):
                if render_mask[y].any():
                    for x, cell in enumerate(row):
                        if render_mask[y][x]:
                            scr.addstr(y, x, ' ', cell)
                    scr.refresh()
            old_cells = numpy_cells

    except Exception as e:
        scr.clear()
        curses.endwin()
        print('\n')
        pp(e.args)
        raise e
    finally:
        scr.clear()
        curses.endwin()
Пример #18
0
def warnRestrictionSites(sequence,name,sites):
	sites = sites.split(",")
	rb = Restriction.RestrictionBatch(sites)

	#Get Bio.Seq object
	amb = IUPACAmbiguousDNA()
	tmpSeq = Seq(sequence,amb)

	#Search for sites
	res = rb.search(tmpSeq)
	
	#Sum hits
	totalSites = 0
	for v in res.values():
		totalSites += len(v)

	if totalSites > 0:
		print >>sys.stderr, "Warning: The following positions in '%s' will be masked from tiles due to incompatible restictions sites:" % (name)
		pp(res)
	else:
		pass
Пример #19
0
def keypress():
    while True:
        try:
            gevent.socket.wait_read(sys.stdin.fileno())
            msg = sys.stdin.readline().strip()
            if msg == "r":
                print("Recovering")
                for connection in connections:
                    connection.recover()
                continue

            if msg == "c":
                print("Clearing")
                while connections:
                    connections.pop().reset()
                continue

            if msg == "l":
                for connection in connections:
                    ip_type = IP
                    addr = connection.remote_addr
                    if ip_address(addr).version == 6:
                        ip_type = IPv6

                    dummy_pkt = ip_type(src=connection.local_addr,
                                        dst=connection.remote_addr) / TCP(
                                            sport=connection.local_port,
                                            dport=connection.remote_port,
                                            ack=connection.remote_seq,
                                            seq=connection.local_seq)
                    pp("Connection:", dummy_pkt, connection)
                continue

            print('''Commands:
            r -- recover connections
            c -- clear connections
            l -- list connections''')
        except Exception as e:
            print(e)
    def __init__(self, key, **civicapi_config_dict):
        self.config_dict = civicapi_config_dict

        if 'fields' in self.config_dict:
            logger.warn("Using custom 'field' value not recommended !!!")
        for k, v in civicapi_default_config_dict.items():
            if k not in self.config_dict:
                self.config_dict[k] = v
        self.config_dict["key"] = key
        logger.info(
            "Configuration after reading configurations and adding defaults:\n%s",
            pp(self.config_dict))
        validate_config(self.config_dict, "civic-api")
Пример #21
0
def main():
    if len(argv) > 1:
        frame_paths = get_frame_paths(argv[1].replace('/', ''))
    else:
        frame_paths = get_frame_paths('ghost_sample_2_frames')

    # Alright gents. Let's get some order in this chaos.
    # Turns out videos look like videos when the frames are in the right order.
    # Filenames need to follow the "<whatever>_f<frame number>.<extension>" rule.
    frame_paths.sort(key=lambda x: int(x.split('_f')[1].split('.png')[0]))
    try:
        scr = prep_curses()
        h, w = scr.getmaxyx()

        frames = tuple(
            resize_frame(cv2.imread(p, cv2.IMREAD_COLOR), w, h - 1)
            for p in frame_paths)
        clamped_cells = clamp(frames)
        color_map, centroids = kmeans(clamped_cells)
        # The attr(ibute) map takes colors and returns an attribute string that is passed to addstr()
        attr_map = init_colors(color_map, centroids)

        for frame in clamped_cells:
            for y, row in enumerate(frame):
                for x, cell in enumerate(row):
                    scr.addstr(y, x, ' ', attr_map[cell])
            scr.refresh()
            curses.napms(25)

    except Exception as e:
        scr.clear()
        curses.endwin()
        print('\n')
        pp(e.args)
        raise e
    finally:
        scr.clear()
        curses.endwin()
Пример #22
0
def tcpr_application(pkt, ip):
    tcp = ip.getlayer(TCP)
    con = get_connection_application(ip.src, ip.dst, tcp.sport, tcp.dport)
    pp("Application:", ip, con)

    # recovery for connect (port switched)
    if con is None and tcp.flags == SYN:
        con = get_old_connection(ip.src, ip.dst, tcp.dport)

    # existing connection
    if con is not None:
        return con.pkt_outgoing(pkt, ip, tcp)

    if tcp.flags & SYN:
        # new connection
        # should be SYN for outgoing connection
        # SYN + ACK for incomming connection
        con = Connection(ip.src, ip.dst, tcp.sport, tcp.dport, True)

        options = dict(tcp.options)
        sack = 1 if "SAckOK" in options else 0
        mss = options.get("MSS", None)
        wscale = options.get("WScale", None)
        con = Connection(local_ip=ip.src,
                         remote_ip=ip.dst,
                         local_port=tcp.sport,
                         remote_port=tcp.dport,
                         is_bind=False,
                         mss=mss,
                         ws=wscale,
                         sack=sack,
                         initial_seq=tcp.seq + 1)
        con.register()
        pp("New Connect:", ip, con)

    return _accept(pkt, ip)
Пример #23
0
def send_local(msg, pkt, socket_type):
    pp("Local " + msg, pkt)
    send(pkt, iface="lo", socket=socket_type())
Пример #24
0
# LOOP
print(f'Starting ({now_str()})')
iter_count = len(data_set) // BATCH_SIZE
while epoch < first_epoch + EPOCH_COUNT:
    iter_metrics = Metrics()
    lr = scheduler.get_lr()[0]
    for i, (inputs, labels) in enumerate(data_loader):
        inputs = inputs.to(device)
        labels = labels.to(device)
        optimizer.zero_grad()
        outputs = model(inputs).to(device)
        loss = criterion(outputs, labels)
        coef = Coef.calc(outputs, labels)
        iter_metrics.append_loss(loss.item())
        iter_metrics.append_coef(coef)
        pp('epoch[{ep}]:{i}/{I} iou:{c.pjac:.4f} acc:{c.pdice:.4f} loss:{loss:.4f} lr:{lr:.4f} ({t})'.format(
            ep=epoch, i=i+1, I=iter_count, lr=lr, t=now_str(), loss=loss.item(), c=coef))
        loss.backward()
        optimizer.step()
    pp('epoch[{ep}]:Done. iou:{c.pjac:.4f} acc:{c.pdice:.4f} gsi:{c.gsensi:.4f} gsp:{c.gspec:.4f} tsi:{c.tsensi:.4f} tsp:{c.tspec:.4f} loss:{loss:.4f} lr:{lr:.4f} ({t})'.format(
        ep=epoch, t=now_str(), lr=lr, loss=iter_metrics.avg('losses'), c=iter_metrics.avg_coef()
        ))
    gc.collect()
    print()
    weight_path = os.path.join(DEST_DIR, f'{Model.__name__.lower()}_{epoch}.pt')
    weights = model.module.cpu().state_dict() if USE_MULTI_GPU else model.cpu().state_dict()
    metrics.append_coef(iter_metrics.avg_coef())
    metrics.append_loss(iter_metrics.avg_loss())
    store.set_states(weights, optimizer.state_dict(), metrics.state_dict())
    store.save(weight_path)
    print(f'save weights to {weight_path}')
    model = model.to(device)
Пример #25
0
    model.load_state_dict(store.weights)
else:
    raise Exception(f'Weights are needed.')
if USE_MULTI_GPU:
    model = torch.nn.DataParallel(model)

input_img = cv2.imread(INPUT_PATH)

print(f'Start inference')
grid = split_maxsize(input_img, (SIZE, SIZE2))
output_img_rows = []
for y, row in enumerate(grid):
    output_img_tiles = []
    for x, img in enumerate(row):
        padded_input_img, original_dims = add_padding(img)
        pp(f'Processing {x},{y}/{len(row)-1},{len(grid)-1} size:{padded_input_img.shape} ({now_str()})')
        pre_process = Compose([
            ToTensor(),
            Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
        input_tensor = torch.unsqueeze(pre_process(padded_input_img).to(device), dim=0)
        with torch.no_grad():
            output_tensor = model(input_tensor)
        output_arr = output_tensor.data[0].cpu().numpy()
        output_arr = remove_padding(np.transpose(output_arr, (1, 2, 0)), original_dims)
        output_img_tiles.append(output_arr)
        gc.collect()
    output_img_rows.append(cv2.hconcat(output_img_tiles))

pp(f'Done process {INPUT_PATH}')
print('')
Пример #26
0
def ff_layer(params, emb, options, prefix='ff_', activ=tf.nn.tanh):
    return activ(tf.matmul(emb,params[pp(prefix,'W')])+\
                 params[pp(prefix,'b')])
Пример #27
0
def eva_module(module_ast, module_name, fpath):
    # TODO: __main__ module
    print "--> cur_module begin", module_name
    # if "gevent" in module_name:
    #     import pdb; pdb.set_trace() 

    global debug_mn
    debug_mn.append(module_name)
    
    module = Module(module_ast)
    module.module_name = module_name
    sys_modules[module_name] = module
    module.namespace = local_namespace = global_namespace = Namespace(module)
    module.namespace.global_namespace = global_namespace
    global_namespace.is_module = True
    global_namespace.module_name = module_name
    global_namespace.module_anko = module

    if fpath is not None:
        global_namespace.fpath = fpath

    # 载入builtins
    if module_name == "__main__":
        builtin_init()

    for ast_node in module_ast.body:
        if print_lineno:
            print "~~~~~~", module_name, ast_node.lineno
        coverage_line(fpath, ast_node.lineno)
        # domain.connect(fpath, ast_node.lineno, local_namespace)
        eva_statement(ast_node, local_namespace)

    if module_name == "__main__":
        global qcalls
        while qcalls and 0:
            mi = len(qcalls)
            mq, qcalls = qcalls, []
            for i, function in enumerate(mq):
                try:
                    # print i, mi
                    if function.module_fpath.startswith(r"C:\Python27\Lib"):
                        continue
                    call_queue(function)
                except:
                    pass
        iii = 0

        from coverage import coverage_snapshot
        from coverage import coverage_compare
        snapshot = coverage_snapshot()
        import coverage
        coverage.prim = True
        for function in qscalls:
            if function.module_fpath.startswith(r"C:\Python27\Lib"):
                continue
            call_anko(function)
            iii += 1
        print iii, len(qscalls)
        coverage_compare(snapshot)
        # import pdb; pdb.set_trace() 
            
        pp(global_namespace.__dict__)
    #     import pdb; pdb.set_trace() 

    print "<-- cur_module end", module_name
    debug_mn.pop()
    return module
Пример #28
0
 def get(self, prefix, attr_basename):
     extended_prefix = self.extend_prefix(prefix)
     attr_name = utils.pp(extended_prefix, attr_basename)
     attr = getattr(self, attr_name)
     return attr
Пример #29
0
 def set(self, prefix, attr_basename, value):
     extended_prefix = self.extend_prefix(prefix)
     attr_name = utils.pp(extended_prefix, attr_basename)
     setattr(self, attr_name, value)
    exit(1)

if 'html_variables' in input_dict:
    pass
elif 'message' in input_dict:
    input_dict["html_variables"]["message"] = input_dict["message"]
else:
    logger.error(
        "html_variables or message not defined in input."
        "Please specify 'message' for default html"
        "template or 'html_variables' as dictionary of custom variables (as per your html template)"
    )

# Validating Input
validate_input(input_dict)

logger.info("Configurations afte reading from config file\n%s",
            pp(config_dict))

# Creating look up address to use with Civic API
from_address = AddressObj.get_address_obj(**input_dict)
lookup_address = from_address.get_lookup_address()

logger.info("Lookup Address to use with Google Civic API: %s", lookup_address)
civicApi = CivicApi(config_dict["auth"]["civic-key"],
                    **config_dict["civic-api"])
to_address = civicApi.get_representative_address(lookup_address)
lob_api = LobApi(config_dict["auth"]["lob-key"], **config_dict["lob-api"])
url = lob_api.fetch_url(to_address, from_address, input_dict["html_variables"])
print url
Пример #31
0
 def evaluate(self):
     """ This function would build the graph to evalute the log perplexity of the test data """
     weight, bias, emb = self.sess.run([self.sm_w_t, self.sm_b, self.emb])
     return utils.pp(weight, bias, emb, self.test_data)
Пример #32
0
def send_remote(msg, pkt, socket_type):
    pp("Remote " + msg, pkt)
    send(pkt, socket=socket_type())
Пример #33
0
            pre_process = Compose([
                ToTensor(),
                Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])
            input_tensor = torch.unsqueeze(pre_process(input_arr).to(device), dim=0)
            with torch.no_grad():
                output_tensor = model(input_tensor)
            output_arr = output_tensor.data[0].cpu().numpy()
            output_arr = np.transpose(output_arr, (1, 2, 0))
            output_arr = remove_padding(output_arr, original_dims)
            output_tensor = torch.unsqueeze(torch.from_numpy(output_arr).permute(2, 0, 1), dim=0).to(device)
            label_tensor = torch.unsqueeze(torch.from_numpy(label_arr).permute(2, 0, 1), dim=0).to(device)
            coef = Coef.calc(output_tensor, label_tensor)
            output_img_tiles.append(output_arr)
            metrics.append_coef(coef)
            pp(f'Process {item.name} {x},{y}/{len(row)-1},{len(splitted)-1} iou:{coef.pjac:.4f} acc:{coef.pdice:.4f} ({now_str()})')
            gc.collect()
        output_img_rows.append(cv2.hconcat(output_img_tiles))
    output_img = label_to_img(cv2.vconcat(output_img_rows), alpha=True)
    masked_img = overlay_transparent(item.x_raw, output_img) # TODO: overlay transparented mask
    os.makedirs(DEST_DIR, exist_ok=True)
    cv2.imwrite(os.path.join(DEST_DIR, f'{item.name}.jpg'), masked_img)
    m = train_metrics if item.is_train else val_metrics
    avg_coef = metrics.avg_coef()
    m.append_coef(avg_coef)
    report.append(item.name, avg_coef, 'train' if item.is_train else 'val')
    report.save()
    pp(f'{item.name}: {metrics.avg_coef().to_str()} ({now_str()})')
    print('')

all_metrics = Metrics()