예제 #1
0
 def create(self, validated_data):
     """
     Create a new `Hero` instance
     :param validated_data: request data
     :return: new `Hero` instance
     """
     validated_data['path'] = get_path(validated_data['army_name'])
     return Hero.objects.create(**validated_data)
예제 #2
0
 def __init__(self, path="database/progress.dat"):
     self.path = get_path(path)
     try:
         handler = open(self.path, "r")
         self.progress = float(handler.read())
         handler.close()
     except Exception:
         self.progress = 0
예제 #3
0
def path():
    waypoints = request.args.get('waypoints')

    waypoints = json.loads(waypoints)

    path = get_path(waypoints)

    return jsonify(path)
예제 #4
0
def start_server():
    application = tornado.web.Application(
        [(r"/", IndexHandler), (r"/progress", ProgressHandler),
         (r"/data", DataHandler),
         (r"/css/(.*)", tornado.web.StaticFileHandler, {
             "path": get_path("resources/css")
         })],
        debug=True)
    application.listen(10317)
예제 #5
0
def get_path_str(request, info):
    """
    This function returns army unit path
    :param request: request
    :param info: army unit name
    :return: JSON with path
    """
    try:
        return Response(get_path(info))
    except:
        return Response(status=status.HTTP_404_NOT_FOUND)
예제 #6
0
def get_path_pk(request, pk):
    """
    This function returns army path
    :param request: request
    :param pk: object id
    :return: path
    """
    info = Hero.objects.get(pk=pk).army_name
    try:
        return Response(get_path(info))
    except:
        return Response(status=status.HTTP_404_NOT_FOUND)
예제 #7
0
    def search_granules(self, srcDir, sDTime, eDTime, BBox=[[-90,-180],[90,180]],  verbose=True):
        '''
        BBox    : [[lllat,lllon], [urlat,urlon]]    /* lat: -90 ~ 90 */
                                                    /* lon: -180 ~ 180 */
        '''
        srcPATH = get_path(srcDir, sDTime, eDTime)
        if len(srcPATH)==0:
            print "!"*50
            print "Warning     by %s"%(__file__.split("/")[-1])
            print "No file for the time [%s]-[%s]"%(sDTime,eDTime)
            print "in %s"%(srcDir)
            print "!"*50
            raise IOError
        '''
        gtrkDim = [get_gtrack_dim(path, self.func_read, self.cached, self.cacheDir)
                           for path in srcPATH]
        '''
        gtrkDim = [get_gtrack_dim(path, self.func_read_vs, self.cached, self.cacheDir, verbose=verbose)
                           for path in srcPATH]

        DTime, Lat, Lon   = zip(*gtrkDim)
        Granule           = deque([])
        for dtime, lat, lon, path in map(None, DTime, Lat, Lon, srcPATH):

            mskLat  = ma.masked_outside( lat, BBox[0][0], BBox[1][0] ).mask
            mskLon  = ma.masked_outside( lon, BBox[0][1], BBox[1][1] ).mask
            mskTime = ma.masked_outside( dtime, sDTime, eDTime).mask

            #mask    = (mskLat + mskLon).all(1) + mskTime
            mask    = (mskLat + mskLon).all(0) + mskTime

            if not mask.all():
                idx = ma.array( arange(dtime.size), "int", mask=mask).compressed()
                Granule.append([path,
                                dtime[idx],
                                lat[idx],
                                lon[idx],
                                idx
                                ])
                if verbose==True:
                    print '* [V] ground track dimension (%s): %s'%(self.cached,path)
            else:
                if verbose==False:
                    print '* [_] ground track dimension (%s): %s'%(self.cached,path)

        summary = '| [{}] granules intersects domain {} out of [{}] total between ({}-{}) |\n'    \
                  .format( len(Granule), tuple(BBox), len(srcPATH), sDTime, eDTime )

        line    = '+' + '-'*len(summary[3:]) + '+\n'

        print line + summary + line

        return list(Granule)
예제 #8
0
def test(book_id):
    analogy_path = get_path(book_id, "../../gutenberg_scrapped_analogies/"
                            ) + "book{}_analogies.csv".format(book_id)
    book_path = get_path(
        book_id, "../../gutenberg_scraps/") + "book{}.txt".format(book_id)
    analogies = pd.read_csv(analogy_path)
    analogies['text'] = analogies['text'].apply(lambda x: fix(x))
    with open(book_path, 'r') as book:
        text = book.read().replace("\n", ' ')
        text = fix(text)
        sentences = nltk.sent_tokenize(text)
    count = 0
    for i in analogies['text']:
        print(i)


#         prev, after = get_context(i['name'])
#         print(prev)
#         print(i['text'])
#         print(after)

#test(16)
예제 #9
0
    def search_granules(self,
                        srcDir,
                        sDTime,
                        eDTime,
                        BBox=[[-90, -180], [90, 180]],
                        thresh=0.001):
        '''
        BBox    : [[lllat,lllon], [urlat,urlon]]    /* lat: -90 ~ 90 */
                                                    /* lon: -180 ~ 180 */
        '''

        srcPATH = get_path(srcDir, sDTime, eDTime)

        gtrkDim = [
            get_gtrack_dim(path, self.func_read, self.cached, self.cacheDir)
            for path in srcPATH
        ]

        DTime, Lat, Lon = zip(*gtrkDim)

        Granule = []
        for dtime, lat, lon, path in map(None, DTime, Lat, Lon, srcPATH):

            mskLat = ma.masked_outside(lat, BBox[0][0], BBox[1][0]).mask
            mskLon = ma.masked_outside(lon, BBox[0][1], BBox[1][1]).mask
            mskTime = ma.masked_outside(dtime, sDTime, eDTime).mask

            #mask    = (mskLat + mskLon).any(1) + mskTime
            mask = (mskLat + mskLon).all(1) + mskTime

            if not mask.all():

                idx = ma.array(arange(dtime.size), 'int',
                               mask=mask).compressed()
                Granule.append([path, dtime[idx], lat[idx], lon[idx], idx])

                print '* [V] ground track dimension (%s): %s' % (self.cached,
                                                                 path)

            else:
                print '* [_] ground track dimension (%s): %s' % (self.cached,
                                                                 path)

        summary = '| [{}] granules intersects domain {} out of [{}] total between ({}-{}) |\n'    \
                  .format( len(Granule), tuple(BBox), len(srcPATH), sDTime, eDTime )

        line = '+' + '-' * len(summary[3:]) + '+\n'

        print line + summary + line

        return Granule
예제 #10
0
    def tasks():

        # Import registry keys for creating Disk Cleanup option 64 which has all cleanup options selected

        output_progress(arguments_received, 'Importing registry key settings for configuring Disk Cleanup options', log_name)
        reg_keys = get_path('win_optimizer\\resources\clean_mgr_reg_keys.reg') # Define path within project files to registry key file to import
        print()
        subprocess.call(['reg', 'import', reg_keys]) # Import registry keys for creating Disk Cleanup Option 64

        # Run Disk Cleanup with our selected options

        output_progress(arguments_received, 'Running Disk Cleanup', log_name)
        clean_cmd = "cleanmgr /sagerun:64"
        subprocess.call(['powershell.exe',clean_cmd]) # Output results to console
예제 #11
0
def update_all_path(request):
    """
    This function will update path field for all heroes
    :param request: request
    :return: 200 if OK, 503 if FAIL
    """
    heroes = Hero.objects.all()
    try:
        for hero in heroes:
            hero.path = get_path(hero.army_name)
            hero.save(force_update=True)
        return Response(status=status.HTTP_200_OK)
    except:
        return Response(status=status.HTTP_503_SERVICE_UNAVAILABLE)
예제 #12
0
    def search_granules(self, srcDir, sDTime, eDTime, BBox=[[-90,-180],[90,180]], thresh=0.001):
        '''
        BBox    : [[lllat,lllon], [urlat,urlon]]    /* lat: -90 ~ 90 */
                                                    /* lon: -180 ~ 180 */
        '''

        srcPATH = get_path(srcDir, sDTime, eDTime)

        gtrkDim = [get_gtrack_dim(path, self.func_read, self.cached, self.cacheDir)
                                                                for path in srcPATH]

        DTime, Lat, Lon     = zip(*gtrkDim)

        Granule     = []
        for dtime, lat, lon, path in map(None, DTime, Lat, Lon, srcPATH):

            mskLat  = ma.masked_outside( lat, BBox[0][0], BBox[1][0] ).mask
            mskLon  = ma.masked_outside( lon, BBox[0][1], BBox[1][1] ).mask
            mskTime = ma.masked_outside( dtime, sDTime, eDTime).mask

            #mask    = (mskLat + mskLon).any(1) + mskTime
            mask    = (mskLat + mskLon).all(1) + mskTime

            if not mask.all():

                idx = ma.array( arange(dtime.size), 'int', mask=mask).compressed()
                Granule.append([path,
                                dtime[idx],
                                lat[idx],
                                lon[idx],
                                idx
                                ])

                print '* [V] ground track dimension (%s): %s'%(self.cached,path)

            else:
                print '* [_] ground track dimension (%s): %s'%(self.cached,path)

        summary = '| [{}] granules intersects domain {} out of [{}] total between ({}-{}) |\n'    \
                  .format( len(Granule), tuple(BBox), len(srcPATH), sDTime, eDTime )

        line    = '+' + '-'*len(summary[3:]) + '+\n'

        print line + summary + line

        return Granule
예제 #13
0
def write_analogies(book_id):

    out_path = get_path(book_id, out_dir)
    book_id = str(book_id)
    txt_file_name = out_path
    csv_file_name = out_path
    output_handler = open(txt_file_name + "book%s_analogies.txt" % book_id,
                          "w",
                          encoding="utf-8")
    # Find the indices of all paragraphs that contain the patterns as listed in analogy_string_list
    paras = text_to_paras(book_id)
    para_indices = find_any_patterns(paras, analogy_string_list)
    ids = {}  # save sentences' ids in hash table to prevent duplicates.

    # Extract the exact sentences and write them to csv and txt files.
    with open(csv_file_name + "book%s_analogies.csv" % book_id,
              'w',
              encoding="utf-8") as csvfile:
        fieldnames = ['name', 'text']
        writer = csv.DictWriter(csvfile,
                                fieldnames=fieldnames,
                                quoting=csv.QUOTE_ALL,
                                lineterminator='\n')
        writer.writeheader()
        for para_index in para_indices:
            sentence_pos = get_analogy_sentence(paras[para_index],
                                                analogy_string_list)
            # get_analogy_sentence returns a 2-element tuple. The first element is the analogy string,
            # the second is its sentence index within the paragraph.
            sentence = sentence_pos[0]
            sent_index = sentence_pos[1]
            if sentence != '':
                # Generate the ID of the sentence (e.g. [BRWN, PARA#1, SENT#1]).
                id_tag = "[" + book_id + ", PARA#" + str(
                    para_index) + ", SENT#" + str(sent_index) + "]"
                if not id_tag in ids.keys():
                    ids[id_tag] = True
                    output_handler.write(id_tag + "\n")
                    output_handler.write(sentence + "\n")
                    writer.writerow({'name': id_tag, 'text': sentence})
    output_handler.close()
예제 #14
0
def scrap_book_num(book_num):
    book_num = str(book_num)
    book_url = URL + book_num
    web_request = requests.get(book_url)
    web_html = web_request.text
    try:
        soup = BeautifulSoup(web_html, "html.parser")
    except:
        pass
    links = soup.find_all('a', class_="link")
    out_path = get_path(book_num, output_dir)
    for link in links:
        if "Text" in link.text:
            get_book = link.get('href')
            request = requests.get("https:" + get_book)
            #../is the parent directory, you can change the following line to
            #change output destination
            with open(out_path + "book" + book_num + ".txt",
                      'wb') as open_file:
                for chunk in request.iter_content(10000):
                    open_file.write(chunk)
            print("book%s" % book_num + "worked")
            open_file.close()
예제 #15
0
 def load_data_source(self,
                      path="database/source",
                      progress: Progress = None):
     c = self.cursor()
     path = get_path(path)
     for root, dirs, files in os.walk(path):
         number = len(files)
         progress_step = 1 / number
         progress_value = 0
         if progress:
             progress.set(progress_value)
         for filename in files:
             obj = DataSource(filename)
             date = obj.get_date()
             data = obj.get_data()
             sql = "INSERT INTO `data` VALUES ('%s', ?, ?, ?, ?, ?)" % date
             c.executemany(sql, data)
             self.commit()
             progress_value += progress_step
             if progress:
                 progress.set(progress_value)
     if progress:
         progress.set(1)
예제 #16
0
def get_random_analogy():
    book_id = random.randint(0,NUM_OF_BOOKS)
    path = get_path(book_id, analogy_path)
    analogies = path + "book%s_analogies.csv" % book_id
    #if the randomly generated path does not exist
    try:
        df = pd.read_csv(analogies)
    except:  
        #print("{} does not exist".format(book_id))
        return get_random_analogy()
    #fixes the punctuation
    df['text'] = df['text'].apply(lambda x: fix(x))
    #if file selected has no analogies in it, call the function again to pick a different analogy
    if len(df)  == 0:
       #print("no analogies here")
        return get_random_analogy()
    #pick a random analogy    
    analogy = df.sample(n=1, random_state=1)
    #check if analogy is nanalogy
    while like_is_verb(str(analogy['text'])):
        #print("like is a verb")
        return get_random_analogy()
    #adds the context to the dataframe
    #to change values about this, including how many sentences before and after and input directory,
    #go to context.py.
    name = analogy['name'].to_string(index = False)
    try:
        prev, after = get_context(name, book_path)
    except Exception as e:
       # print("get context failed because:")
        #print(e)
        return get_random_analogy()
    analogy.insert(1,'prev_sent',prev)
    analogy.insert(3, 'next_sent', after)
    print("analogy returned")
    return analogy
예제 #17
0
 def __init__(self):
     self.path = get_path()
     self.stdout = PIPE
     self.stdin = PIPE
예제 #18
0
from get_path import get_path

path = get_path()

with open(path + "../VERSION", "r") as f:
    version = f.read()

print(version)
예제 #19
0
 def get(self):
     self.render(get_path("resources/index.html"))
예제 #20
0
import os
from tishi import *
import time

import get_path
from imp import reload
reload(get_path)
f_p = get_path.get_path()


def write_file(l, mod, f_path=f_p):
    '''
	写入文件
	'''
    #	print('写入文件:',f_path)
    #	f_path = str(os.getcwd())+'/student_info.txt'

    with open(f_path, mod) as fw:
        for d in l:
            fw.write('%s,%s,%s\n' % (d['姓名'], d['年龄'], d['成绩']))
    tishi('正在保存文件', 1)
    print('文件已保存到:%s' % f_path)
    time.sleep(1)
예제 #21
0
def text_to_paras(name, book_dir):
    book_id = get_book_id(name)
    book_path = get_path(book_id, book_dir)
    corpus = PlaintextCorpusReader(book_path, '.*')
    paragraphs = corpus.paras('book%s.txt' % book_id)
    return paragraphs
예제 #22
0
 def pensar(self):
     self.path = get_path(level=self.level)
     print("path:", self.path)
예제 #23
0
 def __init__(self, path, root="database/source"):
     self.path = get_path(root + "/" + path)
     self.filename = self.path.split(os.path.sep)[-1]
예제 #24
0
 def create_tables(self):
     path = get_path("database/default.sql")
     sql = open(path, "rb").read().decode()
     self.cursor().execute(sql)
     self.commit()
예제 #25
0
 def __init__(self, path="database/default.db"):
     self.path = get_path(path)
     self.conn = sqlite3.connect(self.path)
예제 #26
0
from get_path import get_path
from create_csv import create_csv

path_source = '/Users/macbookpro/Documents/project/dataset/'
path_destination = '/Users/macbookpro/Documents/project/dataset/csv/'

file_path = get_path(path_source)

for file in file_path:
    table = create_csv(file, path_destination)
예제 #27
0
def clean(begin, end, output_dir):
    Popen(["bash", "-c", "chmod +x text_cleaner.sh"])
    for i in range(begin, end + 1, 100):
        clean_path = get_path(i, output_dir)
        call(["bash", "./text_cleaner.sh", clean_path])
        print("cleaned", clean_path)
예제 #28
0
def text_to_paras(book_id):
    #put in directory of the .txt files
    book_path = get_path(book_id, book_dir)
    corpus = PlaintextCorpusReader(book_path, '.*')
    paragraphs = corpus.paras('book%s.txt' % book_id)
    return paragraphs