Beispiel #1
0
    def generatePuzzle(self, message, modulus=const.PUZZLE_MODULUS_LENGTH):
        """Generates a new time-lock puzzle by locking the given message and
        using the given modulus. The new puzzle is then returned."""

        assert (len(message) * 8) < const.PUZZLE_MODULUS_LENGTH

        if (modulus % 8) != 0:
            raise ValueError("Modulus must be divisible by 8.")

        puzzle = {}

        while True:
            # Generate random primes and add `n' (= p * q) to the puzzle.
            p = number.getPrime(modulus / 2)
            q = number.getPrime(modulus / 2)
            n = p * q
            puzzle["n"] = n

            # Use phi_n as a shortcut to ``encrypt'' the message.
            phi_n = (p - 1) * (q - 1)
            e = pow(2, self.t, phi_n)
            b = pow(self.a, e, n)
            Ck = (int(message.encode("hex"), 16) + b) % n
            puzzle["Ck"] = Ck

            # Make sure that the puzzle is always of the same size.
            if len(util.dump(puzzle["n"])) == \
                    len(util.dump(puzzle["Ck"])) == (modulus / 8):
                return puzzle
Beispiel #2
0
def main():
    all_data = read_all(directory='../data/03_powertransform')
    dims = get_dims(all_data)
    dump(dims, '../data/07_dims/dims03.joblib')
    all_data = read_all(directory='../data/05_onehot')
    dims = get_dims(all_data)
    dump(dims, '../data/07_dims/dims05.joblib')
Beispiel #3
0
def load_test():
    """
    Load dataset for testing.

    Returns
    -------
    X: numpy ndarray, shape: (num_of_enrollments, num_of_features)
    Rows of features.
    """
    pkl_path = util.cache_path('test_X')
    if os.path.exists(pkl_path):
        X = util.fetch(pkl_path)
    else:
        enroll_set = np.sort(util.load_enrollment_test()['enrollment_id'])
        # log = util.load_logs()
        # base_date = log['time'].max().to_datetime()
        base_date = datetime(2014, 8, 1, 22, 0, 47)
        X = None
        for f in MODELING['features']:
            X_ = f(enroll_set, base_date)
            if X is None:
                X = X_
            else:
                X = np.c_[X, X_]
        util.dump(X, pkl_path)
    return X
Beispiel #4
0
    def generatePuzzle( self, message, modulus=const.PUZZLE_MODULUS_LENGTH):
        """Generates a new time-lock puzzle by locking the given message and
        using the given modulus. The new puzzle is then returned."""

        assert (len(message) * 8) < const.PUZZLE_MODULUS_LENGTH

        if (modulus % 8) != 0:
            raise ValueError("Modulus must be divisible by 8.")

        puzzle = {}

        while True:
            # Generate random primes and add `n' (= p * q) to the puzzle.
            p = number.getPrime(modulus / 2)
            q = number.getPrime(modulus / 2)
            n = p * q
            puzzle["n"] = n

            # Use phi_n as a shortcut to ``encrypt'' the message.
            phi_n = (p - 1) * (q - 1)
            e = pow(2, self.t, phi_n)
            b = pow(self.a, e, n)
            Ck = (int(message.encode("hex"), 16) + b) % n
            puzzle["Ck"] = Ck

            # Make sure that the puzzle is always of the same size.
            if len(util.dump(puzzle["n"])) == \
                    len(util.dump(puzzle["Ck"])) == (modulus / 8):
                        return puzzle
    def import_mesh(self, fpath, scale=1., object_world_matrix=None):
        ext = os.path.splitext(fpath)[-1]
        if ext == '.obj':
            bpy.ops.import_scene.obj(filepath=str(fpath), split_mode='OFF')
        elif ext == '.ply':
            bpy.ops.import_mesh.ply(filepath=str(fpath))

        obj = bpy.context.selected_objects[0]
        util.dump(bpy.context.selected_objects)  # print the object attributes

        if object_world_matrix is not None:
            obj.matrix_world = object_world_matrix

        bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
        obj.location = (0., 0., 0.)  # center the bounding box!

        if scale != 1.:
            bpy.ops.transform.resize(value=(scale, scale, scale))

        # Disable transparency & specularities
        M = bpy.data.materials
        for i in range(len(M)):
            M[i].use_transparency = False
            M[i].specular_intensity = 0.0

        # Disable texture interpolation
        T = bpy.data.textures
        for i in range(len(T)):
            try:
                T[i].use_interpolation = False
                T[i].use_mipmap = False
                T[i].use_filter_size_min = True
                T[i].filter_type = "BOX"
            except:
                continue
Beispiel #6
0
    def _set_subdir(self, subdir):
        subdir = util.forceutf8(subdir)
        if subdir:
            subdir = '/'.join(p for p in subdir.split('/') if p)

        self.__subdir = None
        subdirfile = os.path.join(self.metapath, 'subdir')

        if os.path.isfile(subdirfile):
            stored_subdir = util.load(subdirfile)
            assert stored_subdir is not None
            if subdir is None:
                self.__subdir = stored_subdir
            elif subdir and subdir != stored_subdir:
                raise hgerror.Abort(
                    'unable to work on a different path in the '
                    'repository')
            else:
                self.__subdir = subdir
        elif subdir is not None:
            util.dump(subdir, subdirfile)
            self.__subdir = subdir
        elif not self._skiperror:
            raise hgerror.Abort("hgsubversion metadata unavailable; "
                                "please run 'hg svn rebuildmeta'")
Beispiel #7
0
def load_test():
    """
    Load dataset for testing.

    Returns
    -------
    X: numpy ndarray, shape: (num_of_enrollments, num_of_features)
    Rows of features.
    """
    pkl_path = util.cache_path('test_X')
    if os.path.exists(pkl_path):
        X = util.fetch(pkl_path)
    else:
        enroll_set = np.sort(util.load_enrollment_test()['enrollment_id'])
        # log = util.load_logs()
        # base_date = log['time'].max().to_datetime()
        base_date = datetime(2014, 8, 1, 22, 0, 47)
        X = None
        for f in MODELING['features']:
            X_ = f(enroll_set, base_date)
            if X is None:
                X = X_
            else:
                X = np.c_[X, X_]
        util.dump(X, pkl_path)
    return X
Beispiel #8
0
def run_experiment(name, optimizer, nonlin, kfac, iters, lr):
    losses, vlosses = kfac_lib.train(optimizer=optimizer,
                                     nonlin=nonlin,
                                     kfac=kfac,
                                     iters=iters,
                                     lr=lr)
    u.dump(losses, 'losses_' + name + '.csv', True)
    u.dump(vlosses, 'vlosses_' + name + '.csv', True)
 def layout(self):
     # this method can't determine the layout, but it needs to be
     # resolved into something other than auto before this ever
     # gets called
     if not self._layout or self._layout == 'auto':
         self._layout = layouts.detect.layout_from_config(self)
         util.dump(self._layout, self.layout_file)
     return self._layout
def run(data, args):
	untld = []
	for d in data:
		if not args.lang in d:
			d[args.lang] = ""
			untld.append(d)

	dump(untld, args.outfile)
def run(data, args):
	compiled = {}
	for item in data:
		if not args.lang in item:
			continue
		crc = str(crc32(item['orig'].encode('utf-8')) & 0xffffffff)
		compiled[crc] = item[args.lang]

	dump(compiled, args.patchfile, pretty=False)
Beispiel #12
0
    def onData(self, interest, data):
        """
        FileSync:
            To be written (TBW)

        """
        # TODO: Verify packet
        self.keyChain.verifyData(data, self.onVerified, self.onVerifyFailed)

        util.dump("Got data packet with name", data.getName().toUri())
        util.dumpData(data)

        content = fileSyncBuf_pb2.FileSync()
        content.ParseFromString(data.getContent().toRawStr())
        print("Type: " + str(content.dataType) + ", data: " + content.data)

        if self.getNowMilliseconds() - content.timestamp * 1000.0 < 120000.0:
            # Use getattr because "from" is a reserved keyword.
            name = getattr(content, "from")
            prefix = data.getName().getPrefix(-2).toUri()
            sessionNo = int(data.getName().get(-2).toEscapedString())
            sequenceNo = int(data.getName().get(-1).toEscapedString())
            nameAndSession = name + str(sessionNo)

            l = 0
            # Update roster.
            while l < len(self.roster):
                entry = self.roster[l]
                tempName = entry[0:len(entry) - 10]
                tempSessionNo = int(entry[len(entry) - 10:])
                if (name != tempName and content.dataType !=
                        fileSyncBuf_pb2.FileSync.UNSUBSCRIBE):
                    l += 1
                else:
                    if name == tempName and sessionNo > tempSessionNo:
                        self.roster[l] = nameAndSession
                    break

            if l == len(self.roster):
                self.roster.append(nameAndSession)
                print(name + ": Subscribe")

            # Use getattr because "from" is a reserved keyword.
            if (content.dataType == fileSyncBuf_pb2.FileSync.UPDATE
                    and not self.isRecoverySyncState
                    and getattr(content, "from") != self.screenName):
                self.onRecievedFileUpdate(content)
            elif content.dataType == fileSyncBuf_pb2.FileSync.UNSUBSCRIBE:
                # leave message
                try:
                    n = self.roster.index(nameAndSession)
                    if name != self.screenName:
                        self.roster.pop(n)
                        print(name + ": Unsubscribe")
                except ValueError:
                    pass
Beispiel #13
0
def main():
    formatter = '%(asctime)s %(message)s'
    logging.basicConfig(filename='../logs/02_featuretools.log', level=logging.INFO, format=formatter)

    datas = read_all()
    app_train = datas['application_train']
    app_test = datas['application_test']
    bureau = datas['bureau']
    bureau_balance = datas['bureau_balance']
    cash = datas['POS_CASH_balance']
    previous = datas['previous_application']
    installments = datas['installments_payments']
    credit = datas['credit_card_balance']

    app_test["TARGET"] = np.nan
    app = app_train.append(app_test, ignore_index=True, sort=False)

    # Entity set with id applications
    entity_set = ft.EntitySet(id='HomeCredit')

    # Entities with a unique index
    entity_set = entity_set.entity_from_dataframe(entity_id='app', dataframe=app, index='SK_ID_CURR')
    entity_set = entity_set.entity_from_dataframe(entity_id='bureau', dataframe=bureau, index='SK_ID_BUREAU')
    entity_set = entity_set.entity_from_dataframe(entity_id='previous', dataframe=previous, index='SK_ID_PREV')

    # Entities that do not have a unique index
    entity_set = entity_set.entity_from_dataframe(
        entity_id='bureau_balance', dataframe=bureau_balance, make_index=True, index='bureaubalance_index'
    )
    entity_set = entity_set.entity_from_dataframe(
        entity_id='cash', dataframe=cash, make_index=True, index='cash_index'
    )
    entity_set = entity_set.entity_from_dataframe(
        entity_id='installments', dataframe=installments, make_index=True, index='installments_index'
    )
    entity_set = entity_set.entity_from_dataframe(
        entity_id='credit', dataframe=credit, make_index=True, index='credit_index'
    )

    # Add in the defined relationships
    entity_set = entity_set.add_relationships([
        ft.Relationship(entity_set['app']['SK_ID_CURR'],      entity_set['bureau']['SK_ID_CURR']),
        ft.Relationship(entity_set['bureau']['SK_ID_BUREAU'], entity_set['bureau_balance']['SK_ID_BUREAU']),
        ft.Relationship(entity_set['app']['SK_ID_CURR'],      entity_set['previous']['SK_ID_CURR']),
        ft.Relationship(entity_set['previous']['SK_ID_PREV'], entity_set['cash']['SK_ID_PREV']),
        ft.Relationship(entity_set['previous']['SK_ID_PREV'], entity_set['installments']['SK_ID_PREV']),
        ft.Relationship(entity_set['previous']['SK_ID_PREV'], entity_set['credit']['SK_ID_PREV'])
    ])

    agg_primitives = ['sum', 'count', 'min', 'max', 'mean', 'mode']
    feature_matrix, _ = ft.dfs(
        entityset=entity_set, target_entity='app', agg_primitives=agg_primitives, max_depth=2, features_only=False, verbose=True
    )

    feature_matrix = feature_matrix.reset_index()
    dump(feature_matrix, '../data/02_featuretools/feature_matrix.joblib')
Beispiel #14
0
def process(df_sk_id_curr, item):
    name, df = item
    print(f'--- {name} ---')
    cont = {}
    drop_cols = [column for column in df.columns if column.startswith('SK_ID')]
    for sk_id_curr in tqdm(df_sk_id_curr['SK_ID_CURR']):
        data = df[df['SK_ID_CURR'] == sk_id_curr].sort_values(
            SORT_KEYS[name]).tail(MAX_LEN)
        data = data.drop(drop_cols, axis=1)
        cont[sk_id_curr] = expand(data.values, MAX_LEN)
    dump(cont, f'../data/06_onehot_seq/{name}.joblib')
Beispiel #15
0
def main():
    all_data = read_all(directory='../data/03_powertransform')
    app_train = all_data.pop('application_train')
    app_test = all_data.pop('application_test')
    data = app_train.append(app_test, sort=False)
    data = pd.get_dummies(data)
    app_train = data.dropna(subset=['TARGET'])
    app_test = data[data['TARGET'].isnull()].drop('TARGET', axis=1)
    dump(app_train, '../data/05_onehot/application_train.joblib')
    dump(app_test, '../data/05_onehot/application_test.joblib')
    with Pool(6) as pool:
        pool.map(process, list(all_data.items()))
Beispiel #16
0
def looksLikePuzzle( assumedPuzzle ):
    """Returns `True' if any of the hard-coded primes is a factor of a given `n'
    or `False' otherwise."""

    if not (len(util.dump(assumedPuzzle["n"])) == \
            len(util.dump(assumedPuzzle["Ck"])) == \
            (const.PUZZLE_MODULUS_LENGTH / 8)):
        return False

    for prime in primes.primes:
        if (assumedPuzzle["n"] % prime) == 0:
            return False
    return True
Beispiel #17
0
def looksLikePuzzle(assumedPuzzle):
    """Returns `True' if any of the hard-coded primes is a factor of a given `n'
    or `False' otherwise."""

    if not (len(util.dump(assumedPuzzle["n"])) == \
            len(util.dump(assumedPuzzle["Ck"])) == \
            (const.PUZZLE_MODULUS_LENGTH / 8)):
        return False

    for prime in primes.primes:
        if (assumedPuzzle["n"] % prime) == 0:
            return False
    return True
Beispiel #18
0
def process(df_sk_id_curr, item):
    name, df = item
    print(f'--- {name} ---')
    cat = {}
    cont = {}
    for sk_id_curr in tqdm(df_sk_id_curr['SK_ID_CURR']):
        data = df[df['SK_ID_CURR'] == sk_id_curr].sort_values(
            SORT_KEYS[name]).tail(MAX_LEN)
        cat[sk_id_curr] = expand(
            data.select_dtypes('category').astype('int').values + 1, MAX_LEN)
        cont[sk_id_curr] = expand(
            data.select_dtypes('float32').values, MAX_LEN)
    dump(cat, f'../data/04_sequence/{name}_cat.joblib')
    dump(cont, f'../data/04_sequence/{name}_cont.joblib')
Beispiel #19
0
    def onInterest(self, prefix, interest, transport, registeredPrefixId):
        """
        FileSync:
            To be written (TBW)

        """
        util.dump("Got interest packet with name", interest.getName().toUri())
        util.dumpInterest(interest)

        content = fileSyncBuf_pb2.FileSync()
        sequenceNo = int(interest.getName().get(self.fileFolderPrefix.size() +
                                                1).toEscapedString())
        gotContent = False

        #loop through all cached data and find out if you have some new content to respond with
        for i in range(len(self.syncDataCache) - 1, -1, -1):
            data = self.syncDataCache[i]
            if data.sequenceNo == sequenceNo:
                if data.dataType != fileSyncBuf_pb2.FileSync.UPDATE:
                    # Use setattr because "from" is a reserved keyword.
                    setattr(content, "from", self.screenName)
                    content.to = self.fileFolderName
                    content.dataType = data.dataType
                    content.timestamp = int(round(data.time / 1000.0))
                else:
                    setattr(content, "from", self.screenName)
                    content.to = self.fileFolderName
                    content.dataType = data.dataType
                    content.data = data.data
                    content.timestamp = int(round(data.time / 1000.0))
                gotContent = True
                break

        if gotContent:
            logging.info("new content!")
            #Serialize the pklistbuf
            array = content.SerializeToString()
            #Initialize the data with Name
            data = Data(interest.getName())
            #Set content for the data --> the serialized content to bytes
            data.setContent(Blob(array))
            #Add sign the data
            self.keyChain.sign(data, self.certificateName)
            try:
                transport.send(data.wireEncode().toBuffer())
            except Exception as ex:
                logging.getLogger(__name__).error(
                    "Error in transport.send: %s", str(ex))
                return
Beispiel #20
0
 def _set_uuid(self, uuid):
     self.__uuid = None
     uuidfile = os.path.join(self.metapath, 'uuid')
     if os.path.isfile(uuidfile):
         stored_uuid = util.load(uuidfile)
         assert stored_uuid
         if uuid and uuid != stored_uuid:
             raise hgutil.Abort('unable to operate on unrelated repository')
         self.__uuid = uuid or stored_uuid
     elif uuid:
         util.dump(uuid, uuidfile)
         self.__uuid = uuid
     elif not self._skiperror:
         raise hgutil.Abort("hgsubversion metadata unavailable; "
                            "please run 'hg svn rebuildmeta'")
Beispiel #21
0
 def _set_uuid(self, uuid):
     self.__uuid = None
     uuidfile = os.path.join(self.metapath, 'uuid')
     if os.path.isfile(uuidfile):
         stored_uuid = util.load(uuidfile)
         assert stored_uuid
         if uuid and uuid != stored_uuid:
             raise hgutil.Abort('unable to operate on unrelated repository')
         self.__uuid = uuid or stored_uuid
     elif uuid:
         util.dump(uuid, uuidfile)
         self.__uuid = uuid
     elif not self._skiperror:
         raise hgutil.Abort("hgsubversion metadata unavailable; "
                            "please run 'hg svn rebuildmeta'")
Beispiel #22
0
def main():
    # Read map from stdin
    (map, start, goal) = parse_map()

    # Dump original map if requested
    if (args.original):
        dump(map, args)

    # Find shortest path
    path = a_star(map, start, goal)
    if path:
        new_map = write_path(map, start, path)
        dump(new_map, args)
        print('Cost:', path_cost(start, path))
    else:
        print('Not reachable.')
Beispiel #23
0
def ccs811_loop(addr, stop=None):
    def log(*args, **kwargs):
        print('CCS811.{:x}:'.format(addr), *args, **kwargs)

    with util.flock('/tmp/tvoc.{}.lock'.format(hex(addr))):
        status_file = '/tmp/tvoc.{}.txt'.format(hex(addr))
        throttle = util.Throttle(60)
        if addr == 0x5a:
            poster = util.GoogleFormPoster(
                'https://docs.google.com/forms/d/e/1FAIpQLScsxaGES6uXJMzOmJDOpCVJCjaX8EZpAb1HOx6McEIwVqGeFw/viewform?usp=pp_url&entry.806682994=0&entry.1017453344=1&entry.1050656656=2&entry.815754693=3'
            )
        else:
            poster = util.GoogleFormPoster(
                'https://docs.google.com/forms/d/e/1FAIpQLSeOFDSIc_vW59OKUwnwN1jf0D9qm7vZS5ISo0YgSNhd0rwW1A/viewform?usp=pp_url&entry.806682994=0&entry.1017453344=1&entry.1050656656=2&entry.815754693=3'
            )
        baseline_throttle = util.Throttle(24 * 3600)
        baseline_throttle.maybe_run(lambda: None)
        with smbus2.SMBus(1) as bus:
            dev = CCS811(bus, addr)
            assert dev.is_device()
            dev.maybe_start_app()
            dev.maybe_load_baseline('baseline')
            dev.switch_mode(1)
            while stop is None or not stop.is_set():
                try:
                    status = dev.status()
                    if status.error:
                        log(dev.error())
                        util.dump('error', status_file)
                    elif status.data_ready:
                        result = dev.result()
                        util.dump(result.tvoc, status_file)
                        if result.e_co2 <= 8192 and result.tvoc <= 1187:
                            log(
                                result,
                                throttle.maybe_run(lambda: poster.post(
                                    None, [
                                        result.e_co2, result.tvoc, result.raw.
                                        current, result.raw.voltage
                                    ])))
                            baseline_throttle.maybe_run(
                                lambda: dev.save_baseline('baseline'))
                except Exception as e:
                    print(e)
                time.sleep(1)
        print('exit ccs811', hex(addr))
Beispiel #24
0
def label(args, logger, project, exe):
    inputs = [f for f in os.listdir('afl_output/crashes') if f != 'README.txt']
    logger.info(str(len(inputs)) + " crashes found")
    for f in inputs:
        try:
            crash_cmd = exe + " < afl_output/crashes/" + f
            output = subprocess.Popen(crash_cmd,
                                      shell=True,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)
            output = output.communicate()[1].decode(encoding="utf-8",
                                                    errors="ignore")
            bug_type = get_afl_bug_type(output)
            (bug_file, bug_line) = get_afl_bug_location(output)
            util.dump(args, project, bug_file, bug_line, bug_type, afl)
        except:
            pass
Beispiel #25
0
def complex_train_test():

    np.random.seed(0)

    do_images = True

    train_images = load_MNIST.load_MNIST_images('data/train-images-idx3-ubyte')
    dsize = 10000
    patches = train_images[:, :dsize]
    fs = [dsize, 28 * 28, 196, 28 * 28]
    cost, train_op = cost_and_grad(fs=fs,
                                   X0=patches,
                                   lambda_=3e-3,
                                   rho=0.1,
                                   beta=3,
                                   lr=0.1)

    sess = tf.get_default_session()

    u.reset_time()
    old_cost = sess.run(cost)
    old_i = 0
    frame_count = 0
    costs = []
    for i in range(2000):
        cost0, _ = sess.run([cost, train_op])
        costs.append(cost0)
        if i % 100 == 0:
            print(cost0)
            # filters are transposed in visualization
        if ((old_cost - cost0) / old_cost > 0.05
                or i - old_i > 50) and do_images:
            Wf_ = sess.run("Wf_var/read:0")
            W1_ = u.unflatten_np(Wf_, fs[1:])[0]
            display_network.display_network(W1_.T,
                                            filename="pics/weights-%03d.png" %
                                            (frame_count, ))
            frame_count += 1
            old_cost = cost0
            old_i = i
        u.record_time()

    #  u.dump(costs, "costs_adam.csv")
    u.dump(costs, "costs_adam_bn1.csv")
    u.summarize_time()
def run_experiment(iters, name):

    #batch_sizes = [1, 10, 100, 1000, 10000, 60000]
    batch_sizes = [100, 200, 300]

    eager_stats = []
    pytorch_stats = []

    def benchmark(f):
        # do whole run once for pre-warming
        f()
        import gc
        gc.collect()
        start_time = time.perf_counter()
        final_loss = f()
        elapsed_time = time.perf_counter() - start_time
        return final_loss, elapsed_time

    for batch_size in batch_sizes:

        def eager_run():
            return eager_lbfgs.benchmark(batch_size=batch_size, iters=iters)

        eager_stats.append(benchmark(eager_run))

        def pytorch_run():
            return pytorch_lbfgs.benchmark(batch_size=batch_size, iters=iters)

        pytorch_stats.append(benchmark(pytorch_run))

    print(eager_stats)
    print(pytorch_stats)
    # pytorch_losses
    # pytorch_times
    # pytorch_sizes

    eager_stats = np.array(eager_stats)
    pytorch_stats = np.array(pytorch_stats)
    u.dump(batch_sizes, name + "_batch.csv")

    u.dump(eager_stats[:, 0], name + "_eager_loss.csv")
    u.dump(eager_stats[:, 1], name + "_eager_time.csv")

    u.dump(pytorch_stats[:, 0], name + "_pytorch_loss.csv")
    u.dump(pytorch_stats[:, 1], name + "_pytorch_time.csv")
Beispiel #27
0
   def __init__(self, dataset, name, para_corpus=None, filename=None):
      self.name = name
      self.dataset = dataset
      self.filename = 'dat/%s/invs_%s.pkl' % (dataset, name) if not filename else filename

      if not para_corpus:
         self.invs = load(self.filename)
      else:
         self.invs = {}
         num_invs, inv_dists, inv_chunks, num_pairs = self.count(para_corpus)
         self.invs['count'] = num_invs
         self.invs['dist'] = inv_dists
         self.invs['npairs'] = num_pairs
         self.invs['chunks'] = inv_chunks
         dump(self.invs, self.filename)

      self.inv_percent = None
      self.inv_dist = None
Beispiel #28
0
 def get_word2vec(self, file_path):
     file_w2v = os.path.join(model_dir, "w2v.pkl")
     exists = os.path.isfile(file_w2v)
     if exists:  # load a prexisting pickel module
         word2vec = pickle.load(open(file_w2v, "rb"))
         return word2vec
     else:
         file = open(file_path, "r")
         if (file):
             word2vec = dict()
             split = file.read().splitlines()
             for line in split:
                 key = line.split(' ', 1)[0]  # the first word is the key
                 value = np.array(
                     [float(val) for val in line.split(' ')[1:]])
                 word2vec[key] = value
             util.dump(word2vec, os.path.join(util.model_dir, "w2v.pkl"))
             print("dumped the w2v file")
             return (word2vec)
Beispiel #29
0
    def solvePuzzle(self, puzzle):
        """Attempts to unlock the given puzzle based on the global constants
        and the semiprime `n'. The locked master key is then returned."""

        assert len(puzzle.items()) == 2

        n, Ck = puzzle["n"], puzzle["Ck"]
        b = pow(gmpy.mpz(self.a), pow(2, self.t), n)
        masterKey = (Ck - b) % n

        return util.dump(masterKey)
Beispiel #30
0
    def solvePuzzle( self, puzzle ):
        """Attempts to unlock the given puzzle based on the global constants
        and the semiprime `n'. The locked master key is then returned."""

        assert len(puzzle.items()) == 2

        n, Ck = puzzle["n"], puzzle["Ck"]
        b = pow(gmpy.mpz(self.a), pow(2, self.t), n)
        masterKey = (Ck - b) % n

        return util.dump(masterKey)
Beispiel #31
0
def run_experiment(iters, name):

  #batch_sizes = [1, 10, 100, 1000, 10000, 60000]
  batch_sizes = [100, 200, 300]

  eager_stats = []
  pytorch_stats = []

  def benchmark(f):
    # do whole run once for pre-warming
    f()
    import gc; gc.collect()
    start_time = time.perf_counter()
    final_loss = f()
    elapsed_time = time.perf_counter() - start_time
    return final_loss, elapsed_time

  for batch_size in batch_sizes:
    def eager_run():
      return eager_lbfgs.benchmark(batch_size=batch_size, iters=iters)
    eager_stats.append(benchmark(eager_run))
    def pytorch_run():
      return pytorch_lbfgs.benchmark(batch_size=batch_size, iters=iters)
    pytorch_stats.append(benchmark(pytorch_run))

  print(eager_stats)
  print(pytorch_stats)
  # pytorch_losses
  # pytorch_times
  # pytorch_sizes
  
  eager_stats = np.array(eager_stats)
  pytorch_stats = np.array(pytorch_stats)
  u.dump(batch_sizes, name+"_batch.csv")
  
  u.dump(eager_stats[:,0], name+"_eager_loss.csv")
  u.dump(eager_stats[:,1], name+"_eager_time.csv")
  
  u.dump(pytorch_stats[:,0], name+"_pytorch_loss.csv")
  u.dump(pytorch_stats[:,1], name+"_pytorch_time.csv")
Beispiel #32
0
def main():
    apis_list = preliminary_task()
    dump("apis_list.json", apis_list)

    responce = call_endpoint(apis_list)
    dump("responce.json", responce)

    instances = extract_instances(responce)
    dump("instances.json", instances)

    flatten(instances)
    dump("pseudo_table_dict.json", pseudo_table_dict)

    insert()
Beispiel #33
0
def main(rows, cols, obstacle_density):
    assert rows >= 4
    assert cols >= 4
    assert obstacle_density < 1

    # Generate frame
    top = [['X'] * cols]
    middle = [gen_middle_row(cols) for _ in range(1, rows - 1)]
    bottom = [['X'] * cols]
    map = top + middle + bottom

    # Generate obstacles with given density
    n_obstacles = int((rows - 2) * (cols - 2) * obstacle_density)
    for _ in range(0, n_obstacles):
        set_random(map, 'X')

    # Set start
    set_random(map, 'S')

    # Set goal (make sure its not the same as start)
    set_random_unless(map, 'O', 'S')
    dump(map, args)
Beispiel #34
0
    def _set_subdir(self, subdir):
        if subdir:
            subdir = '/'.join(p for p in subdir.split('/') if p)

        self.__subdir = None
        subdirfile = os.path.join(self.metapath, 'subdir')

        if os.path.isfile(subdirfile):
            stored_subdir = util.load(subdirfile)
            assert stored_subdir is not None
            if subdir is None:
                self.__subdir = stored_subdir
            elif subdir and subdir != stored_subdir:
                raise hgutil.Abort('unable to work on a different path in the '
                                   'repository')
            else:
                self.__subdir = subdir
        elif subdir is not None:
            util.dump(subdir, subdirfile)
            self.__subdir = subdir
        elif not self._skiperror:
            raise hgutil.Abort("hgsubversion metadata unavailable; "
                               "please run 'hg svn rebuildmeta'")
Beispiel #35
0
def pm25_loop(stop=None):
    with util.flock('/tmp/pm25.lock'):
        throttle = util.Throttle(60)
        # poster = util.GoogleFormPoster('https://docs.google.com/forms/d/e/1FAIpQLSePbFzMLyEaQVJ9aW-ZRPYsXO8kfm1ay7khmRADiDz0rondYw/viewform?usp=pp_url&entry.1441205787=1970-01-01&entry.1440681565=00:00&entry.1603777044=0&entry.8655809=1&entry.668218130=2&entry.1797950773=3&entry.1371427267=4&entry.1869212283=5&entry.671273885=6&entry.2022906028=7&entry.18109896=8&entry.1464624802=9&entry.2068592484=10&entry.1601194695=11&entry.1795391290=12')
        status_file = '/tmp/pm25.txt'
        while stop is None or not stop.is_set():
            try:
                for i in generate_pms5003_message():
                    when = time.gmtime()
                    ints = parse_pms5003_message(i)
                    util.dump(ints[4], status_file)
                    try:
                        print(
                            when, 'PM25:', ints
                        )  # , throttle.maybe_run(lambda: poster.post(when, ints)))
                    except:
                        pass
                    if stop is not None and stop.is_set():
                        break
            except Exception as e:
                print(e)
                util.dump('error', status_file)
                time.sleep(1)
        print('exit pm25')
Beispiel #36
0
def generateRawPuzzle( masterKey ):
    """Generates a time-lock puzzle with the given masterKey locked inside.
    Returns the puzzle as 128-byte string which is ready to be sent over the
    wire."""

    assert len(masterKey) == const.MASTER_KEY_SIZE

    riddler = TimeLockPuzzle()
    puzzle = riddler.generatePuzzle(const.MASTER_KEY_PREFIX + masterKey)

    # Convert decimal numbers to raw strings.
    rawPuzzle = bytearray()
    rawPuzzle = [util.dump(x) for x in [puzzle["n"], puzzle["Ck"]]]

    # Return single concatenated string.
    return "".join(rawPuzzle)
Beispiel #37
0
def generateRawPuzzle(masterKey):
    """Generates a time-lock puzzle with the given masterKey locked inside.
    Returns the puzzle as 128-byte string which is ready to be sent over the
    wire."""

    assert len(masterKey) == const.MASTER_KEY_SIZE

    riddler = TimeLockPuzzle()
    puzzle = riddler.generatePuzzle(const.MASTER_KEY_PREFIX + masterKey)

    # Convert decimal numbers to raw strings.
    rawPuzzle = bytearray()
    rawPuzzle = [util.dump(x) for x in [puzzle["n"], puzzle["Ck"]]]

    # Return single concatenated string.
    return "".join(rawPuzzle)
Beispiel #38
0
def encryptPuzzle( rawPuzzle ):
    """Encrypts the given `rawPuzzle' with a randomly chosen and small key and
    returns the encrypted puzzle together with the nonce used for AES-CTR."""

    assert len(rawPuzzle) == const.PUZZLE_LENGTH

    log.debug("Encrypting raw %d-byte puzzle." % len(rawPuzzle))

    nonce = mycrypto.strong_random(const.PUZZLE_NONCE_LENGTH)
    cntr = Counter.new(128, initial_value=long(nonce.encode('hex'), 16))
    key = const.MIN_16BYTE_VALUE + \
            random.randint(0, (2 ** const.PUZZLE_OBFUSCATION_KEYSPACE) - 1)
    cipher = AES.new(util.dump(key), AES.MODE_CTR, counter=cntr)

    log.debug("Puzzle key=%x, nonce=%s." % (key, nonce.encode('hex')))

    return cipher.encrypt(rawPuzzle), nonce
Beispiel #39
0
def encryptPuzzle(rawPuzzle):
    """Encrypts the given `rawPuzzle' with a randomly chosen and small key and
    returns the encrypted puzzle together with the nonce used for AES-CTR."""

    assert len(rawPuzzle) == const.PUZZLE_LENGTH

    log.debug("Encrypting raw %d-byte puzzle." % len(rawPuzzle))

    nonce = mycrypto.strong_random(const.PUZZLE_NONCE_LENGTH)
    cntr = Counter.new(128, initial_value=long(nonce.encode('hex'), 16))
    key = const.MIN_16BYTE_VALUE + \
            random.randint(0, (2 ** const.PUZZLE_OBFUSCATION_KEYSPACE) - 1)
    cipher = AES.new(util.dump(key), AES.MODE_CTR, counter=cntr)

    log.debug("Puzzle key=%x, nonce=%s." % (key, nonce.encode('hex')))

    return cipher.encrypt(rawPuzzle), nonce
Beispiel #40
0
def bruteForcePuzzle( nonce, encryptedPuzzle, callback ):
    """Try to obtain the original puzzle by brute-forcing `encryptedPuzzle'
    using the given `nonce' for AES-CTR. When the original is found, `callback'
    is called with the locked master key as argument."""

    assert len(nonce) == const.PUZZLE_NONCE_LENGTH
    assert len(encryptedPuzzle) == const.PUZZLE_LENGTH

    # Try to obtain the puzzle by brute-forcing the n-bit key space.
    for key in xrange(2 ** const.PUZZLE_OBFUSCATION_KEYSPACE):

        cntr = Counter.new(128, initial_value=long(nonce.encode('hex'), 16))
        cipher = AES.new(util.dump(const.MIN_16BYTE_VALUE + key), \
                AES.MODE_CTR, counter=cntr)
        assumedPuzzle = extractPuzzle(cipher.decrypt(encryptedPuzzle))

        # FIXME - terminate still running processes if the puzzle was already
        # found.

        if looksLikePuzzle(assumedPuzzle):
            log.debug("Solving puzzle candidate with key=0x100...00%x." % key)
            solvePuzzleInProcess(assumedPuzzle, callback)
Beispiel #41
0
def bruteForcePuzzle(nonce, encryptedPuzzle, callback):
    """Try to obtain the original puzzle by brute-forcing `encryptedPuzzle'
    using the given `nonce' for AES-CTR. When the original is found, `callback'
    is called with the locked master key as argument."""

    assert len(nonce) == const.PUZZLE_NONCE_LENGTH
    assert len(encryptedPuzzle) == const.PUZZLE_LENGTH

    # Try to obtain the puzzle by brute-forcing the n-bit key space.
    for key in xrange(2**const.PUZZLE_OBFUSCATION_KEYSPACE):

        cntr = Counter.new(128, initial_value=long(nonce.encode('hex'), 16))
        cipher = AES.new(util.dump(const.MIN_16BYTE_VALUE + key), \
                AES.MODE_CTR, counter=cntr)
        assumedPuzzle = extractPuzzle(cipher.decrypt(encryptedPuzzle))

        # FIXME - terminate still running processes if the puzzle was already
        # found.

        if looksLikePuzzle(assumedPuzzle):
            log.debug("Solving puzzle candidate with key=0x100...00%x." % key)
            solvePuzzleInProcess(assumedPuzzle, callback)
Beispiel #42
0
    def Receive (self):
        answer = bytearray ()
        recvcount = 0
        while True:
            c = self.ser.read ()
            if len (c) == 0:
                if (self.debug & 4) and (len (answer) != 0):
                    dump ("Incomplete packet", answer)
                return None

            answer.append (c)
            if len (answer) > MB_HEADER_LEN:
                dl = (answer [2] & MB_LEN_MASK) + 1
                if len (answer) >= MB_HEADER_LEN + dl + 1:
                    # Пакет прибыл полностью. Проверяем CRC8
                    if (CRC8 (answer, 0, MB_HEADER_LEN + dl + 1) != 0):
                        if (self.debug & 4):
                            dump ("Bad packet CRC8", answer)
                        return None

                    if self.debug & 1:
                        dump ("Received", answer)

                    return answer
Beispiel #43
0
def svc_1():
    """
    Submission: svc_1_0620_01.csv
    E_val: 0.866856950449
    E_in: 0.855948
    E_out: 0.8546898189645258
    """
    from sklearn.pipeline import Pipeline
    from sklearn.preprocessing import StandardScaler
    from sklearn.svm import LinearSVC
    from sklearn.cross_validation import StratifiedKFold
    from sklearn.feature_selection import RFE
    from sklearn.grid_search import RandomizedSearchCV
    from sklearn.calibration import CalibratedClassifierCV
    from sklearn.linear_model import LogisticRegression
    from scipy.stats import expon

    logger.debug('svc_1')

    X = util.fetch(util.cache_path('train_X_before_2014-08-01_22-00-47'))
    y = util.fetch(util.cache_path('train_y_before_2014-08-01_22-00-47'))

    raw_scaler = StandardScaler()
    raw_scaler.fit(X)
    X_scaled = raw_scaler.transform(X)

    rfe = RFE(estimator=LogisticRegression(class_weight='auto'), step=1,
              n_features_to_select=21)
    rfe.fit(X_scaled, y)
    util.dump(rfe, util.cache_path('feature_selection.RFE.21'))

    X_pruned = rfe.transform(X_scaled)

    logger.debug('Features selected.')

    new_scaler = StandardScaler()
    new_scaler.fit(X_pruned)
    X_new = new_scaler.transform(X_pruned)

    svc = LinearSVC(dual=False, class_weight='auto')
    rs = RandomizedSearchCV(svc, n_iter=50, scoring='roc_auc', n_jobs=-1,
                            cv=StratifiedKFold(y, 5),
                            param_distributions={'C': expon()})
    rs.fit(X_new, y)

    logger.debug('Got best SVC.')
    logger.debug('Grid scores: %s', rs.grid_scores_)
    logger.debug('Best score (E_val): %s', rs.best_score_)
    logger.debug('Best params: %s', rs.best_params_)

    svc = rs.best_estimator_
    util.dump(svc, util.cache_path('new_data.SVC'))

    isotonic = CalibratedClassifierCV(svc, cv=StratifiedKFold(y, 5),
                                      method='isotonic')
    isotonic.fit(X_new, y)
    util.dump(isotonic,
              util.cache_path('new_data.CalibratedClassifierCV.isotonic'))

    logger.debug('Got best isotonic CalibratedClassifier.')
    logger.debug('E_in (isotonic): %f', auc_score(isotonic, X_new, y))

    to_submission(Pipeline([('scale_raw', raw_scaler),
                            ('rfe', rfe),
                            ('scale_new', new_scaler),
                            ('svc', isotonic)]), 'svc_1_0620_01')
Beispiel #44
0
 def save(self):
     '''Save the Subversion metadata. This should really be called after
     every revision is created.
     '''
     util.dump(self.branches, self.branch_info_file)
Beispiel #45
0
 def Transmit (self, cmd):
     if self.debug & 2:
         dump ("Sending", cmd)
     self.ser.flushInput ()
     self.ser.write (str (cmd))
        revpath, revision = convinfo[40:].split('@')
        # use tmp variable for testing
        subdir = meta.subdir
        if subdir and subdir[0] != '/':
            subdir = '/' + subdir
        if subdir and subdir[-1] == '/':
            subdir = subdir[:-1]
        assert revpath.startswith(subdir), ('That does not look like the '
                                            'right location in the repo.')

        if layout is None:
            layout = layouts.detect.layout_from_commit(subdir, revpath,
                                                       ctx.branch(), meta)
            existing_layout = layouts.detect.layout_from_file(meta)
            if layout != existing_layout:
                util.dump(layout, meta.layout_file)
            layoutobj = layouts.layout_from_name(layout, meta)
        elif layout == 'single':
            assert (subdir or '/') == revpath, ('Possible layout detection'
                                                ' defect in replay')

        # write repository uuid if required
        if meta.uuid is None or validateuuid:
            validateuuid = False
            uuid = convinfo[4:40]
            if not skipuuid:
                if svn is None:
                    svn = svnrepo.svnremoterepo(ui, url).svn
                if uuid != svn.uuid:
                    raise hgutil.Abort('remote svn repository identifier '
                                       'does not match')
Beispiel #47
0
 def _set_cachedconfig(self, value, name, filename):
     varname = '_' + name
     f = os.path.join(self.metapath, filename)
     setattr(self, varname, value)
     util.dump(value, f)
Beispiel #48
0
def main():
    args = parse_args()
    seed_everything(args.seed)
    app_train = joblib.load(
        '../data/03_powertransform/application_train.joblib')
    app_test = joblib.load('../data/03_powertransform/application_test.joblib')
    sequences = read_sequences('../data/04_sequence/')
    dims = joblib.load('../data/07_dims/dims03.joblib')
    dims.pop('application_train')
    dims.pop('application_test')

    for name, diminfo in dims.items():
        cat = sequences[f'{name}_cat']
        cont = sequences[f'{name}_cont']
        train_loader = torch.utils.data.DataLoader(
            SequenceDataset(app_train, cat, cont),
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=6,
            worker_init_fn=worker_init_fn)
        test_loader = torch.utils.data.DataLoader(
            SequenceDataset(app_test, cat, cont),
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=6,
            worker_init_fn=worker_init_fn)
        model = DIMLSTMModule(diminfo, args.n_hidden, train_loader,
                              test_loader, args)
        logdir = '../logs/21_dimlstm'
        path = pathlib.Path(logdir) / name
        if not path.exists():
            path.mkdir(parents=True)
        logger = TensorBoardLogger(logdir, name=name)
        early_stopping = EarlyStopping(patience=args.patience,
                                       monitor='val_loss_main',
                                       mode='min')
        filepath = pathlib.Path(
            logdir) / name / f'version_{logger.version}' / 'checkpoints'
        model_checkpoint = ModelCheckpoint(str(filepath),
                                           monitor='val_loss_main',
                                           mode='min')
        trainer = pl.Trainer(default_save_path=logdir,
                             gpus=-1,
                             max_epochs=args.n_epochs,
                             early_stop_callback=early_stopping,
                             logger=logger,
                             row_log_interval=100,
                             checkpoint_callback=model_checkpoint)
        trainer.fit(model)

        best_model = load_model(model,
                                name,
                                trainer.logger.version,
                                logdir=logdir)
        train_loader_no_shuffle = torch.utils.data.DataLoader(
            SequenceDataset(app_train, cat, cont),
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=6,
            worker_init_fn=worker_init_fn)
        df_train = predict(name, best_model, train_loader_no_shuffle)
        df_test = predict(name, best_model, test_loader)
        df_encoding = pd.concat([df_train, df_test])
        dump(df_encoding, f'../data/21_dimlstm/{name}.joblib')
Beispiel #49
0
    optimizer = AdamWithWeightnorm()

    model = Sequential()
    model.add(Dense(1024, input_dim=28 * 28, activation='relu'))
    model.add(Dense(1024, activation='relu'))
    model.add(Dense(1024, activation='relu'))
    model.add(Dense(196, activation='relu'))
    model.add(Dense(1024, activation='relu'))
    model.add(Dense(1024, activation='relu'))
    model.add(Dense(1024, activation='relu'))
    model.add(Dense(28 * 28, activation='relu'))
    model.compile(loss='mean_squared_error',
                  optimizer=optimizer,
                  metrics=[metrics.mean_squared_error])
    # nb_epochs in older version
    cb = TestCallback((X_train, X_train), (X_test, X_test), prefix)

    start_time = time.time()
    result = model.fit(X_train,
                       X_train,
                       validation_data=(X_test, X_test),
                       batch_size=args.batch_size,
                       nb_epoch=epochs,
                       callbacks=[cb])

    acc_hist = np.asarray(result.history['mean_squared_error']
                          ) * 28 * 28  # avg pixel loss->avg image image loss
    u.dump(acc_hist, "%s_losses.csv" % (prefix, ))
    u.dump(cb.losses, "%s_vlosses.csv" % (prefix, ))
Beispiel #50
0
def load_train(earlist_base_date=None, depth=1, cache_only=False):
    """
    Load dataset for training and validating.

    *NOTE*  If you need a validating set, you SHOULD split from training set
    by yourself.

    Parameters
    ----------
    earlist_base_date: datetime, None by default
    Base date won't be smaller than earlist_base_date.

    depth: int, 1 by default
    Maximum moves of time window.

    cache_only: bool, False by default
    Cache data of every period, do not return full spanned data.

    Returns
    -------
    X: numpy ndarray, shape: (num_of_enrollments, num_of_features)
    Rows of features. It is the features of all time if cache_only is True.

    y: numpy ndarray, shape: (num_of_enrollments,)
    Vector of labels. It is the labels of all time if cache_only is True.
    """
    logger = logging.getLogger('load_train')

    enroll_ids = np.sort(util.load_enrollment_train()['enrollment_id'])
    log = util.load_logs()[['enrollment_id', 'time']]
    # base_date = log['time'].max().to_datetime()
    base_date = datetime(2014, 8, 1, 22, 0, 47)

    logger.debug('load features before %s', base_date)

    pkl_X_path = util.cache_path('train_X_before_%s' %
                                 base_date.strftime('%Y-%m-%d_%H-%M-%S'))
    pkl_y_path = util.cache_path('train_y_before_%s' %
                                 base_date.strftime('%Y-%m-%d_%H-%M-%S'))
    if os.path.exists(pkl_X_path) and os.path.exists(pkl_y_path):
        logger.debug('fetch cached')
        X = util.fetch(pkl_X_path)
        y = util.fetch(pkl_y_path)
    else:
        X, _ = __load_dataset__(enroll_ids, log, base_date)
        y_with_id = util.load_val_y()
        if not np.all(y_with_id[:, 0] == enroll_ids):
            logger.fatal('something wrong with enroll_ids')
            raise RuntimeError('something wrong with enroll_ids')
        y = y_with_id[:, 1]

        util.dump(X, pkl_X_path)
        util.dump(y, pkl_y_path)

    # base_date = log['time'].max().to_datetime() - timedelta(days=10)
    base_date = datetime(2014, 7, 22, 22, 0, 47)
    Dw = timedelta(days=7)
    enroll_ids = __enroll_ids_with_log__(enroll_ids, log, base_date)
    for _ in range(depth - 1):
        if enroll_ids.size <= 0:
            break
        if earlist_base_date is not None and base_date < earlist_base_date:
            break

        logger.debug('load features before %s', base_date)

        # get instances and labels
        pkl_X_path = util.cache_path('train_X_before_%s' %
                                     base_date.strftime('%Y-%m-%d_%H-%M-%S'))
        pkl_y_path = util.cache_path('train_y_before_%s' %
                                     base_date.strftime('%Y-%m-%d_%H-%M-%S'))
        if os.path.exists(pkl_X_path) and os.path.exists(pkl_y_path):
            logger.debug('fetch cached')
            X_temp = util.fetch(pkl_X_path)
            y_temp = util.fetch(pkl_y_path)
        else:
            X_temp, y_temp = __load_dataset__(enroll_ids, log, base_date)

            util.dump(X_temp, pkl_X_path)
            util.dump(y_temp, pkl_y_path)

        # update instances and labels
        if not cache_only:
            X = np.r_[X, X_temp]
            y = np.append(y, y_temp)

        # update base_date and enroll_ids
        base_date -= Dw
        enroll_ids = __enroll_ids_with_log__(enroll_ids, log, base_date)

    return X, y
Beispiel #51
0
                parentbranch = parent.branch()
                if parentbranch == 'default':
                    parentbranch = None
            else:
                parentbranch = None
            # branchinfo is a map from mercurial branch to a
            # (svn branch, svn parent revision, svn revision) tuple
            parentrev = util.getsvnrev(parent, '@').split('@')[1] or 0
            branchinfo[branch] = (parentbranch,
                                  int(parentrev),
                                  revision)

    ui.progress('rebuild', None, total=numrevs)

    # save off branch info
    util.dump(branchinfo, meta.branch_info_file)


def help_(ui, args=None, **opts):
    """show help for a given subcommands or a help overview
    """
    if args:
        subcommand = args[0]
        if subcommand not in table:
            candidates = []
            for c in table:
                if c.startswith(subcommand):
                    candidates.append(c)
            if len(candidates) == 1:
                subcommand = candidates[0]
            elif len(candidates) > 1:
Beispiel #52
0
    m = Alignment(p.prototxt, p.model, p.layername, p.mean_file, p.W, p.t,
                  p.mean_shape)
    filenames = get_filenames(p.filelists)
    if p.method == 'all':
        shapes_max = []
        shapes_max_pca = []
        shapes_mean = []
        shapes_mean_pca = []
        for filename in filenames:
            print filename
            img = caffe.io.load_image(p.root + filename)
            shapes = m.process_all(img)
            shapes_max.append(shapes[0])
            shapes_max_pca.append(shapes[1])
            shapes_mean.append(shapes[2])
            shapes_mean_pca.append(shapes[3])
        dump(np.array(shapes_max), filenames, p.outpath + '_max')
        dump(np.array(shapes_max_pca), filenames, p.outpath + '_max_pca')
        dump(np.array(shapes_mean), filenames, p.outpath + '_mean')
        dump(np.array(shapes_mean_pca), filenames, p.outpath + '_mean_pca')
    else:
        shapes = []
        for filename in filenames:
            print filename
            img = caffe.io.load_image(p.root + filename)
            shape = m.process(img, p.method)
            shapes.append(shape)
        shapes = np.array(shapes)
        dump(shapes, filenames, p.outpath)
    print "Process finished!"
Beispiel #53
0
    expected_slope = -grad2_norm_op.eval()

    # ratio of best possible slope to actual slope
    # don't divide by actual slope because that can be 0
    slope_ratio = abs(actual_slope)/abs(expected_slope)
    costs.append(cost0)
    step_lengths.append(lr0)
    ratios.append(slope_ratio)

    if i%10 == 0:
      print("Learning rate: %f"% (lr0,))
      print("Cost %.2f, expected decrease %.2f, actual decrease, %.2f ratio %.2f"%(cost0, expected_delta, actual_delta, slope_ratio))

    # don't shrink learning rate once results are very close to minimum
    if slope_ratio < alpha and abs(target_delta)>1e-6:
      print("%.2f %.2f %.2f"%(cost0, cost1, slope_ratio))
      print("Slope optimality %.2f, shrinking learning rate to %.2f"%(slope_ratio, lr0*beta,))
      sess.run(lr_set, feed_dict={lr_p: lr0*beta})
    else:
      # see if our learning rate got too conservative, and increase it
      if i>0 and i%10 == 0 and slope_ratio>0.99:
        print("%.2f %.2f %.2f"%(cost0, cost1, slope_ratio))
        print("Growing learning rate to %.2f"%(lr0*growth_rate))
        sess.run(lr_set, feed_dict={lr_p: lr0*growth_rate})

    u.record_time()

  u.dump(step_lengths, "step_lengths_ada.csv")
#  u.dump(costs, "costs_ada.csv")
#  u.dump(ratios, "ratios_ada.csv")
Beispiel #54
0
  def handle_read(self):
    data = self.recv(1024)
    if data == '':
      self.handle_close()
      return
    log.l.LogIt('RTC006', 'D', 'cmd i: %s', (data))
    args = string.split(data)
    if len(args) == 0:
      return;
    self.mode = args[0]
    self.target = args[1]
    args = args[2:]
    log.l.LogIt('RTC007', '1', '%s', (str(args)))
    if len(args) == 0:
      return

    #--------------------
    if args[0] == 'close':
      self.senddata.append(util.close(self.mode == 'router', self.target, args[1:]))

    #--------------------
    elif args[0] == 'data':
      if self.mode == 'domain':
        self.senddata.append('command: '+args[0]+': address only to router')
      if self.mode == 'router':
        self.senddata.append(util.data(args[1:]))

    elif args[0] == 'dump':
      if self.mode == 'domain':
        self.senddata.append('command: '+args[0]+': address only to router')
      if self.mode == 'router':
        self.senddata.append(util.dump())

    #--------------------
    elif args[0] == 'event':
      if self.mode == 'domain':
        self.senddata.append('command: '+args[0]+': address only to router')
      if self.mode == 'router':
        self.senddata.append(util.event(args[1:]))

    #--------------------
    elif args[0] == 'fb' or args[0] == 'fallback' or args[0] == 'secondary':
      if len(args) > 1 and args[1] == 'auto':
        auto = 1
      else:
        auto = 0
      if self.mode == 'domain':
        cfg.domain[self.target]['fallback'] = 1
        self.senddata.append(util.switch_secondary(self.target, auto))
      if self.mode == 'router':
        for i in cfg.domain.keys():
          if cfg.domain[i]['apr'] == cfg.name:
            cfg.domain[i]['fallback'] = 1
            self.senddata.append(i+' '+util.switch_secondary(i, auto))

    #--------------------
    elif args[0] == 'ff' or args[0] == 'fallforward' or args[0] == 'primary':
      if len(args) > 1 and args[1] == 'auto':
        auto = 1
      else:
        auto = 0
      if self.mode == 'domain':
        cfg.domain[self.target]['fallback'] = 0
        self.senddata.append(util.switch_primary(self.target, auto))
      if self.mode == 'router':
        for i in cfg.domain.keys():
          if cfg.domain[i]['apr'] == cfg.name:
            cfg.domain[i]['fallback'] = 0
            self.senddata.append(i+' '+util.switch_primary(i, auto))

    #--------------------
    elif args[0] == 'pvc':
      if self.mode == 'domain':
        self.senddata.append('command: '+args[0]+': address only to router')
      if self.mode == 'router':
        self.senddata.append(util.pvc(args[1:]))

    #--------------------
    elif args[0] == 'refresh':
      evt_hdlr.refresh()
      self.senddata.append('status refreshed')

    #--------------------
    elif args[0] == 'set':
      self.senddata = util.set(self.mode == 'router', self.target, args[1:])

    #--------------------
    elif args[0] == 'sna':
      if self.mode == 'domain':
        self.senddata.append('command: '+args[0]+': address only to router')
      if self.mode == 'router':
        self.senddata.append(util.sna(args[1:]))

    #--------------------
    elif args[0] == 'status':
      if self.mode == 'domain':
        self.senddata.append(util.status(self.target))
      if self.mode == 'router':
        for i in cfg.domain.keys():
          if cfg.domain[i]['apr'] == cfg.name:
            self.senddata.append(i+' '+util.status(i))
      if len(self.senddata) == 0:
        self.senddata.append('not active')

    #--------------------
    elif args[0] == 'stop':
      if self.mode == 'domain':
        self.senddata.append('command: '+args[0]+': address only to router')
      if self.mode == 'router':
        log.l.LogIt('RTC008', 'I', 'command termination', ())
        cfg.stopping = 1
        msg = '%s terminating' % self.target
        self.senddata.append(msg)
        for i in cfg.domain.keys():
          util.closeall(i)

    #--------------------
    elif args[0] == 'trace':
      if self.mode == 'domain':
        self.senddata.append('command: '+args[0]+': address only to router')
      if self.mode == 'router':
        if len(args) > 1:
          log.l.SetTraceLevel(int(args[1]))
          self.senddata.append('trace level %s' % (args[1]))
          log.l.LogIt('RTC009', 'I', 'command trace %s', (args[1]))
        else:
          level = log.l.GetTraceLevel()
          self.senddata.append('trace level %d' % (level))
          log.l.LogIt('RTC010', 'I', 'command get trace: %d', (level))

    #--------------------
    elif args[0] == 'version':
      msg = ver.getVersion()
      if cfg.snasrv_version != '':
        msg = msg + ' snasrv: ' + cfg.snasrv_version
      self.senddata.append(msg)

    #--------------------
    else:
      self.senddata.append('command: '+args[0]+': not implemented')