Esempio n. 1
0
def diff_string (s1, s2):
    t1 = '/tmp/diff1'
    t2 = '/tmp/diff2'
    write_file (t1, s1.split('\n'))
    write_file (t2, s2.split('\n'))
    diffs = Popen([ 'diff', t1, t2 ], stdout=PIPE).stdout.read()
    return diffs
Esempio n. 2
0
    def having_file(self, contents):
        path = '%(info_dir)s/%(name)s.trashinfo' % { 'info_dir' : self.path,
                                                     'name'     : str(self.number)}
        write_file(path, contents)

        self.number += 1
        self.path_of_last_file_added = path
Esempio n. 3
0
def diff_string(s1, s2):
    t1 = '/tmp/diff1'
    t2 = '/tmp/diff2'
    write_file(t1, s1.split('\n'))
    write_file(t2, s2.split('\n'))
    diffs = Popen(['diff', t1, t2], stdout=PIPE).stdout.read()
    return diffs
Esempio n. 4
0
def write_store_file(a_storefile, filedata):
    ''' Ensures the store exists and writes the image data

    Raises IOError if file could not be written or directory could not be created
    '''
    files.ensure_dir(a_storefile[:-1])
    files.write_file(a_storefile, filedata)
Esempio n. 5
0
def compile_gpp(code, temp_dir):
    """Compile C++ source file and return path to binary.."""

    datetime_str = datetime.utcnow().strftime(r"run-%Y%m%d-%H%M-")
    random_str = "".join(random.sample("0123456789", 6))

    subdirs = [temp_dir, "psim", datetime_str + random_str]
    sim_dir = os.path.join(*subdirs)

    source_file = os.path.join(sim_dir, 'psim.cpp')
    output_file = os.path.join(sim_dir, 'psim.exe')

    flags = "-std=c++11 -fdiagnostics-color=always"
    gpp_cmd = "g++ %s -o %s %s" % (flags, output_file, source_file)

    # Create simulation directory (plus parents, if necessary).
    if not os.path.isdir(sim_dir):
        os.makedirs(sim_dir)

    # Write source file and compile.
    write_file(source_file, code)
    exit_code = call(gpp_cmd, shell=True)

    if exit_code != 0:
        raise Exception("Compilation failed (%s)" % source_file)

    # Return path to compiled binary.
    return output_file
Esempio n. 6
0
def test():
    username = request.args.get('username')
    password = request.args.get('password')
    if username in files.get_file().keys():
        return redirect('/')
    else:
        files.write_file(username, password)
        return redirect('/')
Esempio n. 7
0
    def having_file(self, contents):
        path = '%(info_dir)s/%(name)s.trashinfo' % { 'info_dir' : self.path,
                                                     'name'     : str(self.number)}
        make_parent_for(path)
        write_file(path, contents)

        self.number += 1
        self.path_of_last_file_added = path
Esempio n. 8
0
def write_input(ifile, xcc, f="%+13.9f"):
    string = ""
    natoms = fncs.howmanyatoms(xcc)
    for atom in range(natoms):
        x = f % (fncs.x(xcc, atom))
        y = f % (fncs.y(xcc, atom))
        z = f % (fncs.z(xcc, atom))
        string += "  %s %s %s \n" % (x, y, z)
    ff.write_file(ifile, string)
Esempio n. 9
0
def test_post_crud():




    # * CSV file post 'Rattlesnakes, I hate snakes'

    open_CSV()

    # * Print post list

    print_post()

    # * Add post 'Kittens, Kittens are Fuzzy'

    add_posts()

    write.('Kittens, Kittens are Fuzzy')

    write_post('Kittens, Kittens are Fuzzy')

    # * Add user_id of 4 to posts

    add_users()

    # * Print posts showing user names

    print_posts_users()

    # * Remove post

    remove_post()

    # * Remove user

    remove_users()



    text = "line1\nline2"

    path = 'test.txt'

    write_file(path, text)

    t = read_file(path)

    print('text:'+text+'$')

    print('t:'+t+'$')

    assert(t==text)

    assert(t!=text)

    pass
Esempio n. 10
0
    def receive(self, mail_message):
        filename= 'inmail/' + str(uuid.uuid4())
        logging.info('INMAIL_TEST handler')
        logging.info("Received a message from: %s, to: %s" % (mail_message.sender, mail_message.to))
        logging.info("mail date: %s" % mail_message.date)
        logging.info("subject: %s" % mail_message.subject)
        logging.info("filename %s" % filename)
        data = mail_message.original.as_string(unixfrom=True)

        files.write_file(filename,data)

        taskqueue.add(url='/task/incoming_email/', params={'filename':filename})
Esempio n. 11
0
def gclogin(username, password):
    """ login in to geocaching.com """

    loginok = 0

    loginok, response = is_logged_in()

    if loginok == 0:
        data = do_auth(username, password)
        response = data.text
        if "li-user-info" in response:
            data = "username="******"\npassword="******"\n"
            files.write_file("geocaching.ini", data)
            loginok = 1
            files.save_session(SESSION)
    else:
        loginok = 1

    return [loginok, response]
Esempio n. 12
0
def remove_negative_diffs(house_or_times, channel_or_data, path=PATH, input_ftype=INPUT_FTYPE, output_ftype=OUTPUT_FTYPE):
    """
    Removes negative time differences within the data
    """
    
    if input_ftype:
        house, channel = house_or_times, channel_or_data
        times, data = read_file(path + f"/house_{house}/channel_{channel}.{input_ftype}", input_ftype)
    else:
        house = channel = "undefined"
        times, data = house_or_times, channel_or_data
    
    sortinds  = times.argsort()
    new_times = times[sortinds]
    new_data  = data [sortinds]

    if output_ftype:
        write_file(path + f"/house_{house}/channel_{channel}.{output_ftype}", new_times, new_data, output_ftype)
    else:
        return new_times, new_data
Esempio n. 13
0
def create_data_file(user_num, user_record_num):
    if user_num is None or user_num <= 0:
        user_num = 10
    if user_record_num is None or user_record_num <= 0:
        user_record_num = 10
    data = []
    for user_id in range(1, user_num+1):
        for index in range(0, user_record_num):
            item = []
            action_id = randint(1, 7)
            item_id = randint(1, 100)
            time_line_id = str(time.time())[:-3]
            item.append(str(user_id))
            item.append(str(action_id))
            item.append(str(item_id))
            item.append(str(time_line_id))
            record = '-'.join(item) + '\n'
            data.append(record)

    write_file(env.TEST_DATA_FILE_PATH, data)
    return True
Esempio n. 14
0
def test_article_crud():

    # * CSV file Article 'Rattlesnakes, I hate snakes'
    # * Print Article list
    # * Add Article 'Kittens, Kittens are Fuzzy'
    # * Add author_id of 4 to Articles
    # * Print Articles showing Author names
    # * Select articles for Author 4
    # * Lookup '4, Kittens'
    # * Change 'Kittens' body to 'Kittens are cute!'
    # * Remove Article
    # * Remove Author

    text = "line1\nline2"
    path = 'test.txt'
    write_file(path, text)
    t = read_file(path)
    print('text:' + text + '$')
    print('t:' + t + '$')
    assert (t == text)
    assert (t != text)
    def test_should_list_multiple_trashinfo(self):
        write_file("sandbox/info/foo.trashinfo")
        write_file("sandbox/info/bar.trashinfo")
        write_file("sandbox/info/baz.trashinfo")

        result = self.list_trashinfos()

        assert_items_equal(
            ["sandbox/info/foo.trashinfo", "sandbox/info/baz.trashinfo", "sandbox/info/bar.trashinfo"], result
        )
    def test_should_list_multiple_trashinfo(self):
        write_file('sandbox/info/foo.trashinfo')
        write_file('sandbox/info/bar.trashinfo')
        write_file('sandbox/info/baz.trashinfo')

        result = self.list_trashinfos()

        assert_items_equal(['sandbox/info/foo.trashinfo',
                            'sandbox/info/baz.trashinfo',
                            'sandbox/info/bar.trashinfo'], result)
Esempio n. 17
0
def search (files,include,exclude=None):
    files   = files.split('\n')
    include = include.split('\n')
    write_file("/tmp/xfiles",files)
    write_file("/tmp/xinclude",include)
    if exclude:
        exclude = exclude.split('\n')
        write_file("/tmp/xexclude",exclude)
        return do_command ('match /tmp/xfiles /tmp/xinclude /tmp/xexclude')
    else:
        return do_command ('match /tmp/xfiles /tmp/xinclude')
Esempio n. 18
0
def search(files, include, exclude=None):
    files = files.split('\n')
    include = include.split('\n')
    write_file("/tmp/xfiles", files)
    write_file("/tmp/xinclude", include)
    if exclude:
        exclude = exclude.split('\n')
        write_file("/tmp/xexclude", exclude)
        return do_command('match /tmp/xfiles /tmp/xinclude /tmp/xexclude')
    else:
        return do_command('match /tmp/xfiles /tmp/xinclude')
Esempio n. 19
0
def put_domain_doc(doc):
    write_file(map_doc_path(doc), read_input())
Esempio n. 20
0
def interpolate_channel(house_or_times, channel_or_data, threshold=THRESHOLD, path=PATH, input_ftype=INPUT_FTYPE, output_ftype=OUTPUT_FTYPE):
    """
    Goes over all the data in a given channel, interpolates and splits it
    """
    
    if input_ftype:
        house, channel = house_or_times, channel_or_data
        times, data = read_file(path + f"/house_{house}/channel_{channel}.{input_ftype}", input_ftype)
    else:
        house = channel = "undefined"
        times, data = house_or_times, channel_or_data
    
    times, data = remove_negative_diffs(times, data, input_ftype=None, output_ftype=None)

    # Calculate the differences between the consecutive timestamps
    diffs = np.diff(times)
    # Discard those with difference 1, and add the index in the original array
    l1 = np.array([diffs, np.arange(len(diffs))]).transpose()[(diffs - 1).astype(np.bool)]
    
    arrays = []
    current_times = np.empty(times[-1] - times[0], dtype = np.int32)
    current_data  = np.empty(times[-1] - times[0], dtype = np.float32)
    array_index   = 0
    last_i = -1

    for diff, i in l1:  
        if diff == 0:
            current_times[array_index : array_index + i - last_i] = times[last_i : i]
            current_data [array_index : array_index + i - last_i] = data [last_i : i]
        
            array_index += i - last_i
                
            last_i = i
            continue
            
        
        # Add the elements up to the current index to the current arrays
        current_times[array_index : array_index + i - last_i] = times[last_i + 1 : i + 1]
        current_data [array_index : array_index + i - last_i] = data [last_i + 1 : i + 1]
        
        array_index += i - last_i

        # If the time jump is less than the set threshold, interpolate between them
        if diff <= threshold:
            new_times, new_data = interpolate(data, times, diff, i)

            current_times[array_index : array_index + diff - 1] = new_times
            current_data [array_index : array_index + diff - 1] = new_data
            
            array_index += diff - 1
        
        # Else, start new arrays and save the current ones
        else:
            current_times.resize((array_index,))
            current_data .resize((array_index,))
            
            arrays.append((np.copy(current_times), np.copy(current_data)))
            
            current_times = np.empty(times[-1] - times[i], dtype = np.int32)
            current_data  = np.empty(times[-1] - times[i], dtype = np.float32)
            
            array_index = 0
        
        last_i = i
        
    # Add the final elements to the current array
    to_go = len(times) - i - 1
    current_times[array_index : array_index + to_go] = times[i + 1:]
    current_data [array_index : array_index + to_go]  = data [i + 1:]
    
    array_index += to_go
    
    # Deallocate the unnecessarily allocated space
    current_times.resize((array_index,))
    current_data .resize((array_index,))

    # Save it as well
    arrays.append((current_times, current_data))
    
    if output_ftype:
        # Write all of the data to new files
        for file_num, (times_array, data_array) in enumerate(arrays):
            
            # Make the channel_?_preprocessed directory if it doesn't exist
            if not os.path.exists(PATH + f"/house_{house}/channel_{channel}_preprocessed"):
                os.mkdir(PATH + f"/house_{house}/channel_{channel}_preprocessed")
                
            write_file(path + f"/house_{house}/channel_{channel}_preprocessed/file_{output_ftype}.npy", times_array, data_array, output_ftype)
    else:
        return arrays
Esempio n. 21
0
def test_user_bill():
    write_file('user.csv', 'Sammy, [email protected]')
    users = user_list()
    assert(users[0][0] == 'Bill')
Esempio n. 22
0
 def add_trashinfo_for(self, index, path):
     write_file(self.trashinfo_from_index(index),
                a_trashinfo_with_path(path))
Esempio n. 23
0
def page_correct(url):
    correct_text = recall(url + '.correct')
    if correct_text:
        write_file(join(environ['pt'], gethostname(), url), [correct_text])
    return correct_text
Esempio n. 24
0
def put_domain_doc(doc):
    write_file(map_doc_path(doc), read_input())
Esempio n. 25
0
def get_code(testname):
    return write_file(
        tname(testname) + '.tst', recall(tname(testname) + '.tst'))
Esempio n. 26
0
 def having_a_trashed_file(self, name, date):
     contents = "DeletionDate=%sT00:00:00\n" % date
     write_file(self.trashinfo(name), contents)
    def test_should_list_a_trashinfo(self):
        write_file("sandbox/info/foo.trashinfo")

        result = self.list_trashinfos()

        assert_equals(["sandbox/info/foo.trashinfo"], result)
Esempio n. 28
0
def get_code(testname):
    return write_file(tname(testname)+'.tst', recall(tname(testname)+'.tst'))
Esempio n. 29
0
 def having_a_trashed_file(self, name, date):
     contents = "DeletionDate=%sT00:00:00\n" % date
     write_file(self.trashinfo(name), contents)
Esempio n. 30
0
 def write_context(self):
     if self.context_file:
         atoms ={key: val for key, val in self.locals.iteritems()
                 if type(val) in {int, float, str, bool}}
         write_file(self.context_file, pickle.dumps(atoms))
Esempio n. 31
0
def test_file_write():
    files.write_file("test_file.txt", "\nnew line")
    info = files.read_file("test_file.txt")
    assert info[-1] == "new line"
Esempio n. 32
0
 def add_trashinfo_for(self, index, path):
     write_file(self.trashinfo_from_index(index),
                a_trashinfo_with_path(path))
Esempio n. 33
0
 def add_trashinfo(self,
                   path='unspecified/original/location',
                   trashinfo_path=None,
                   trashdir='sandbox/Trash'):
     trashinfo_path = trashinfo_path or self._trashinfo_path(trashdir)
     write_file(trashinfo_path, a_trashinfo_with_path(path))
Esempio n. 34
0
def correct(testname):
    correct_text = recall(tname(testname) + '.correct')
    if correct_text:
        write_file(join(environ['pt'], gethostname(), testname),
                   [correct_text])
    return correct_text
Esempio n. 35
0
def save_table(filename, table):
     write_file(filename, map(row_as_text, table))
Esempio n. 36
0
 def add_trashinfo(self, path='unspecified/original/location',
                         trashinfo_path=None,
                         trashdir='sandbox/Trash'):
     trashinfo_path = trashinfo_path or self._trashinfo_path(trashdir)
     write_file(trashinfo_path, a_trashinfo_with_path(path))
    def test_should_ignore_non_trashinfo(self):
        write_file("sandbox/info/not-a-trashinfo")

        result = self.list_trashinfos()

        assert_equals([], result)
Esempio n. 38
0
def put_page(host,user,path,text=None):
    log_page('put '+host+' '+user+' '+path)
    if text:
        write_file(doc_path(host,user,path), text.split('\n'))
    else:
        write_file(doc_path(host,user,path), read_input())
    def test_should_list_a_trashinfo(self):
        write_file('sandbox/info/foo.trashinfo')

        result = self.list_trashinfos()

        assert_equals(['sandbox/info/foo.trashinfo'], result)
Esempio n. 40
0
def test_user_file():
    write_file('user.csv', 'Sam, [email protected]')
    for a in user_list():
        print('    '.join(a))
    def test_should_ignore_non_trashinfo(self):
        write_file('sandbox/info/not-a-trashinfo')

        result = self.list_trashinfos()

        assert_equals([], result)
    def test_non_trashinfo_should_reported_as_a_warn(self):
        write_file("sandbox/info/not-a-trashinfo")

        self.list_trashinfos()

        self.logger.warning.assert_called_with("Non .trashinfo file in info dir")
    def test_non_trashinfo_should_reported_as_a_warn(self):
        write_file('sandbox/info/not-a-trashinfo')

        self.list_trashinfos()

        self.logger.warning.assert_called_with('Non .trashinfo file in info dir')
Esempio n. 44
0
def doc_put(doc):
    write_file(doc_path(doc), read_input())
Esempio n. 45
0
def match_times_channel(house_or_times,
                        channel_or_data,
                        path=PATH,
                        threshold=THRESHOLD,
                        input_ftype=INPUT_FTYPE,
                        output_ftype=OUTPUT_FTYPE,
                        mean=False,
                        scale=1):
    """
    Preprocesses the data in the given channel of the given house, saves the parts
    of the interpolated data where there is data present from both the mains channel
    and the actual channel
    """

    if output_ftype == "dat": raise ValueError

    if input_ftype:
        house, channel = house_or_times, channel_or_data
        times_c, data_c = read_file(
            path + f"/house_{house}/channel_{channel}.{input_ftype}",
            input_ftype)
        times_m, data_m_1 = read_file(
            path + f"/house_{house}/channel_1.{input_ftype}", input_ftype)
        _, data_m_2 = read_file(
            path + f"/house_{house}/channel_2.{input_ftype}", input_ftype)

        data_m = data_m_1 + data_m_2

        times_c, data_c = remove_negative_diffs(times_c,
                                                data_c,
                                                input_ftype=None,
                                                output_ftype=None)
        times_m, data_m = remove_negative_diffs(times_m,
                                                data_m,
                                                input_ftype=None,
                                                output_ftype=None)

        times_c, data_c = join_results(
            interpolate_channel(times_c,
                                data_c,
                                threshold=threshold,
                                input_ftype=None,
                                output_ftype=None))
        times_m, data_m = join_results(
            interpolate_channel(times_m,
                                data_m,
                                threshold=threshold,
                                input_ftype=None,
                                output_ftype=None))

    else:
        times_c, times_m, *house = house_or_times
        data_c, data_m, *channel = channel_or_data

        if not house:
            house = "undefined"
        else:
            house = house[0]

        if not channel:
            channel = "undefined"
        else:
            channel = channel[0]

    skipped_c = skipped_time(times_c, threshold=THRESHOLD)
    skipped_m = skipped_time(times_m, threshold=THRESHOLD)

    begin_times = [times_c[0], times_m[0]]
    end_times = [times_c[-1], times_m[-1]]

    skipped = sorted(skipped_c + skipped_m)
    i = 0
    while i < len(skipped) - 1:
        (b1, e1), (b2, e2) = skipped[i:i + 2]
        if b2 < e1:
            # Overlap
            skipped[i:i + 2] = [(b1, max(e1, e2))]
        else:
            i += 1

    l = []
    ind_c_e = np.argmax(times_c >= max(begin_times))
    ind_m_e = np.argmax(times_m >= max(begin_times))
    for b, e in skipped:

        ind_c_b = np.argmax(times_c > b)
        ind_m_b = np.argmax(times_m > b)

        times_c_slice = times_c[ind_c_e:ind_c_b]
        data_c_slice = data_c[ind_c_e:ind_c_b]
        times_m_slice = times_m[ind_m_e:ind_m_b]
        data_m_slice = data_m[ind_m_e:ind_m_b]

        if times_c_slice.size and times_m_slice.size:
            l.append((np.copy(times_c_slice), np.copy(data_c_slice) / scale,
                      np.copy(times_m_slice),
                      (np.copy(data_m_slice) - np.mean(data_m_slice) * mean) /
                      scale))

        ind_c_e = np.argmax(times_c >= e)
        ind_m_e = np.argmax(times_m >= e)

    c_min_end = np.argmax(times_c >= min(end_times))
    m_min_end = np.argmax(times_m >= min(end_times))
    times_c_slice = times_c[ind_c_e:c_min_end + 1]
    data_c_slice = data_c[ind_c_e:c_min_end + 1]
    times_m_slice = times_m[ind_m_e:m_min_end + 1]
    data_m_slice = data_m[ind_m_e:m_min_end + 1]

    l.append((np.copy(times_c_slice), np.copy(data_c_slice) / scale,
              np.copy(times_m_slice),
              (np.copy(data_m_slice) - np.mean(data_m_slice) * mean) / scale))

    if output_ftype:
        for file_num, (tcs, dcs, tms, dms) in enumerate(l):
            if not os.path.exists(
                    path + f"/house_{house}/channel_{channel}_preprocessed"):
                os.mkdir(path +
                         f"/house_{house}/channel_{channel}_preprocessed")

            data_array = np.append(dcs, dms)

            write_file(
                path +
                f"/house_{house}/channel_{channel}_preprocessed/file_{file_num}.npy",
                tcs, data_array, output_ftype)

    else:
        return l
Esempio n. 46
0
def save_table(filename, table):
    write_file(filename, map(row_as_text, table))
Esempio n. 47
0
def correct(testname):
    correct_text = recall(tname(testname)+'.correct')
    if correct_text:
        write_file(join(environ['pt'],gethostname(),testname), [correct_text])
    return correct_text
Esempio n. 48
0
def put_page(host,user,path,text=None):
    log_page('put '+host+' '+user+' '+path)
    if text:
        write_file(doc_path(host,user,path), text.split('\n'))
    else:
        write_file(doc_path(host,user,path), read_input())
Esempio n. 49
0
def page_correct(url):
    correct_text = recall(url+'.correct')
    if correct_text:
        write_file(join(environ['pt'],gethostname(),url), [correct_text])
    return correct_text