Пример #1
0
Файл: forum.py Проект: kq2/Ricin
def find_threads(course, forum_folder, forum_id):
    """
    Find all threads in current forum.
    Note: forum 0 has every thread!
    """
    # download the 1st page of given forum
    query = 'sort=firstposted&page=1'
    url = '{}/api/forum/forums/{}/threads?{}'
    url = url.format(course.get_url(), forum_id, query)
    path = forum_folder + '/temp.json'
    util.download(url, path, course.get_cookie_file())

    # download a huge page with all threads
    forum = util.read_json(path)
    num_threads = forum['total_threads']
    url += '&page_size={}'.format(num_threads)
    util.download(url, path, course.get_cookie_file())

    # add each thread's id to forum info
    threads = util.read_json(path)['threads']
    util.remove(path)

    path = forum_folder + '/info.json'
    forum = util.read_json(path)

    forum_threads = []
    for thread in reversed(threads):
        forum_threads.append({'id': thread['id']})

    forum['num_threads'] = num_threads
    forum['threads'] = forum_threads

    util.write_json(path, forum)
Пример #2
0
def _find_files(url, folder, cookie):
    """
    Recursively find all files in current page.
    :param url: A URL to given page.
    :param folder: A destination folder for this page.
    :param cookie: A cookie file used for downloading.
    :return: A list of files (URL, path) in current page.
    """
    files = []

    path = '{}/temp.html'.format(folder)
    util.download(url, path, cookie)

    page = util.read_file(path)
    util.remove(path)

    # recursively find all files in sub-folders
    pattern = r'<tr><td colspan="4"><a href="(.*?)">(.*?)</a>'
    for find in re.finditer(pattern, page, re.DOTALL):
        url = find.group(1)
        sub_folder = '{}/{}'.format(folder, find.group(2))
        files += _find_files(url, sub_folder, cookie)

    # find all files in this page
    pattern = r'<tr><td>(.*?)</td>.*?Embed.*?<a href="(.*?)\?.*?">Download</a>'
    for find in re.finditer(pattern, page, re.DOTALL):
        url = find.group(2)
        file_name = find.group(1)
        path = u'{}/{}'.format(folder, file_name)
        files.append((url, path))

    return files
Пример #3
0
    def link(self, args):
        """
        Actually deploy the configuration by linking the correct spot to the original file.
        @param args: The parsed arguments from the command-line.
        """
        if args.dry:
            print(str(self.source) + " -> " + str(self.destination))
            return

        if not self.src_exists:
            return

        if self.dst_exists_before:
            if args.replace:
                if self.dst_is_link_before:
                    util.unlink(self.destination)
                elif self.dst_is_file_before:
                    util.remove(self.destination)
                elif self.dst_is_dir_before:
                    util.remove_dir(self.destination)
                else:
                    #SUS should never get here.
                    raise Exception("WTF is this shit")
            else:
                # File already exists and isn't going to be replaced.
                return
        else:
            #This is some weird nonsense conserning broken links
            if self.dst_is_link_before:
                util.unlink(self.destination)

        if args.copy:
            util.copy(self.source, self.destination)
        else:
            util.link(self.source, self.destination)
Пример #4
0
def get_data(api):
    response = 'temp.json'
    url = 'https://mediacosmos.rice.edu/api/' + api
    cmd = 'curl "{}" -o "{}" --cookie {}'.format(url, response, COOKIE)
    os.system(cmd)

    data = util.read_json(response)
    util.remove(response)
    return data
Пример #5
0
def calc_nodata_9999_lineage(stacking, band_name, clip_extents, tile_id,
                             rename, workdir):
    """Clip scenes which have data outside the lineage, apply -9999 fill."""
    logger.info('     Start processing for band: %s', band_name)

    mosaic_filename = os.path.join(workdir, tile_id,
                                   tile_id + '_' + rename + '.tif')

    if os.path.exists(mosaic_filename):
        logger.warning("Skip previously generated result %s", mosaic_filename)
        return mosaic_filename

    temp_clipped_names = list()
    temp_masked_names = list()
    for level, stack in reversed(list(enumerate(stacking, start=1))):
        scene_name = util.ffind(workdir, stack['LANDSAT_PRODUCT_ID'],
                                '*' + band_name + '.tif')

        temp_name1 = mosaic_filename.replace('.tif',
                                             '_temp%d' % level + '.tif')
        temp_warp_cmd = ('gdalwarp -te {extents}'
                         ' -dstnodata "-9999" -srcnodata "-9999" {0} {1}')
        util.execute_cmd(
            temp_warp_cmd.format(scene_name, temp_name1, extents=clip_extents))
        temp_clipped_names.append(temp_name1)

        lineg_name = util.ffind(workdir, tile_id, '*LINEAGEQA.tif')
        temp_name2 = mosaic_filename.replace('.tif',
                                             '_temp%dM' % level + '.tif')
        temp_calc_cmd = ('gdal_calc.py -A {0} -B {lineage} --outfile {1}'
                         ' --calc="(A*(B=={level}) + (-9999*(B!={level})))"'
                         ' --NoDataValue=-9999')
        util.execute_cmd(
            temp_calc_cmd.format(temp_name1,
                                 temp_name2,
                                 lineage=lineg_name,
                                 level=level))
        temp_masked_names.append(temp_name2)

    temp_name = mosaic_filename.replace('.tif', '_temp.tif')
    temp_warp_cmd = 'gdalwarp {} {}'.format(' '.join(temp_masked_names),
                                            temp_name)
    util.execute_cmd(temp_warp_cmd)
    util.remove(*temp_masked_names + temp_clipped_names)

    warp_cmd = (
        'gdalwarp -dstnodata "-9999" -srcnodata "-9999" -co "compress=deflate"'
        ' -co "zlevel=9" -co "tiled=yes" -co "predictor=2" {} {}')
    util.execute_cmd(warp_cmd.format(temp_name, mosaic_filename))
    util.remove(temp_name)

    logger.info('    End processing for %s as %s ', band_name, mosaic_filename)
    if not os.path.exists(mosaic_filename):
        logger.error('Processing failed to generate desired output: %s',
                     mosaic_filename)
    return mosaic_filename
Пример #6
0
def cleanup():
    for tdir in dumpscreen_window_dirs:
        removeit(tdir)

    # remove old files in "___source" directory
    import glob
    import time
    now = time.time()
    for f in glob.glob(os.path.join(tmpdir_source, '*')):
        if now - os.stat(f).st_mtime > 30:
            remove(f)
Пример #7
0
def process_lineage_contributing(lineage_filename, n_contrib_scenes):
    """Check historgram for count of scenes which were not all-fill."""
    logger.info('    Start checking contributing scenes')

    info_cmd = 'gdalinfo -hist {}'
    results = util.execute_cmd(info_cmd.format(lineage_filename))
    # TODO: could potentially use this instead...
    util.remove(lineage_filename + '.aux.xml')
    count, array = geofuncs.parse_gdal_hist_output(results['output'])

    logger.info(
        'Parsing histogram from lineage file found %d'
        ' contributing scenes', count)
    if count == 0:
        logger.warning('Found all fill lineage, tile not needed!')
        raise ArdTileNotNeededException()

    # decrement pixel values in lineage file if some scenes didn't contribute
    # any pixels
    if count != n_contrib_scenes:
        delta = n_contrib_scenes - count

        # Determine whether we need decrement the pixel
        # values in the lineage file or not.
        cmd = ''
        if delta == 1:
            if array[0] == 0:
                cmd = ' --calc="A-' + str(delta) + '"'
            elif array[1] == 0 and array[2] > 0:
                cmd = ' --calc="A-(A==3)"'
        elif delta == 2:
            if array[0] == 0 and array[1] == 0:
                cmd = ' --calc="A-' + str(delta) + '"'
            elif array[0] == 0 and array[2] == 0:
                cmd = ' --calc="A-' + str(1) + '"'

        if cmd != '':
            temp_name = lineage_filename.replace('.tif', '_linTemp.tif')
            calc_cmd = ('gdal_calc.py -A {lineage} --outfile {temp} {calc}'
                        ' --type="Byte" --NoDataValue=0 --overwrite')
            util.execute_cmd(
                calc_cmd.format(lineage=lineage_filename,
                                temp=temp_name,
                                calc=cmd))

            # compress
            warp_cmd = ('gdalwarp -co "compress=deflate" -co "zlevel=9"'
                        ' -co "tiled=yes" -co "predictor=2"'
                        ' -overwrite {} {}')
            util.execute_cmd(warp_cmd.format(temp_name, lineage_filename))
            util.remove(temp_name)

    logger.info('finish updating contributing scenes')
    return count
Пример #8
0
def cleanup():
    for tdir in dumpscreen_window_dirs:
        removeit(tdir)

    # remove old files in "___source" directory
    import glob
    import time
    now = time.time()
    for f in glob.glob(os.path.join(tmpdir_source, '*')):
        if now - os.stat(f).st_mtime > 30:
            remove(f)
Пример #9
0
    def download_email_blacklist(self):
        url = self.url + '/data/export/pii'
        path = self.info_folder + '/temp.html'
        util.download(url, path, self.cookie_file)

        content = util.read_file(path)
        pattern = r'href="(https://coursera-reports.*?)"'
        url = re.search(pattern, content).group(1)

        util.remove(path)
        path = self.info_folder + '/email_blacklist.csv'
        util.download(url, path, self.cookie_file)
Пример #10
0
def rm_identity(computations):
    """ Remove or reduce one identity """
    for c in computations:
        if isinstance(c, Identity):
            others = remove(c.__eq__, computations)
            other_vars = reduce(set.union, [o.variables for o in others], set())
            vars = remove(other_vars.__contains__, c.outputs)
            if not vars:
                return others
            if tuple(vars) != c.outputs:
                newident = Identity(*vars)
                return (newident,) + tuple(others)
    return computations
Пример #11
0
    def download_info(self):
        url = self.url
        temp = self.info_folder + '/temp.html'
        util.download(url, temp, self.cookie_file)
        page_html = util.read_file(temp)
        util.remove(temp)

        info_files = ['user.json', 'course.json', 'sidebar.json']
        matches = re.findall(r'JSON\.parse\("(.*?)"\);', page_html)
        for match, info_file in zip(matches, info_files)[1:]:
            info = util.unicode_unescape(match).replace('\\\\', '')
            path = '{}/{}'.format(self.info_folder, info_file)
            util.write_json(path, util.read_json(info, True))
Пример #12
0
    def _input_outputs(self, canonicalize=identity):
        """ Find the inputs and outputs of the complete computation """
        allin = map(canonicalize, unique(chain(*[c.inputs
                                                for c in self.computations])))
        allout = map(canonicalize, unique(chain(*[c.outputs
                                                for c in self.computations])))

        inputs  = remove(allout.__contains__, allin)
        outputs = remove(allin.__contains__, allout)
        ident_inputs  = [i for c in self.computations if isinstance(c, Identity)
                           for i in c.inputs]
        ident_outputs = [o for c in self.computations if isinstance(c, Identity)
                           for o in c.outputs]
        return tuple(inputs + ident_inputs), tuple(outputs + ident_outputs)
Пример #13
0
    def __scrollback_clean(self):
        """clean up scrollback files: remove empty lines at the beginning and at the end of a file"""

        for f in glob.glob(os.path.join(self.basedir, self.savedir,
                           "hardcopy.*")):
            try:
                ftmp = f + "_tmp"
                temp = open(ftmp, "w")
                thefile = open(f, 'r')
                beginning = True
                for line in thefile:
                    if beginning:
                        if cmp(line, '\n') == 0:
                            line = line.replace('\n', "")
                        else:
                            beginning = False
                    temp.write(line)
                temp.close()
                thefile.close()

                temp = open(ftmp, 'r')
                endmark = -1
                lockmark = False
                for (i, line) in enumerate(temp):
                    if cmp(line, '\n') == 0:
                        if not lockmark:
                            endmark = i
                            lockmark = True
                    else:
                        endmark = -1
                        lockmark = False
                temp.close()

                if endmark > 1:
                    thefile = open(f, "w")
                    temp = open(ftmp, 'r')
                    for (i, line) in enumerate(temp):
                        if i == endmark:
                            break
                            None
                        else:
                            thefile.write(line)
                    thefile.close()
                    temp.close()
                    util.remove(ftmp)
                else:
                    util.remove(f)
                    os.rename(ftmp, f)
            except:
                sys.stderr.write('Unable to clean scrollback file: ' + f + '\n')
Пример #14
0
    def __scrollback_clean(self):
        """clean up scrollback files: remove empty lines at the beginning and at the end of a file"""

        for f in glob.glob(os.path.join(self.basedir, self.savedir, "hardcopy.*")):
            try:
                ftmp = f + "_tmp"
                temp = open(ftmp, "w")
                thefile = open(f, "r")
                beginning = True
                for line in thefile:
                    if beginning:
                        if cmp(line, "\n") == 0:
                            line = line.replace("\n", "")
                        else:
                            beginning = False
                    temp.write(line)
                temp.close()
                thefile.close()

                temp = open(ftmp, "r")
                endmark = -1
                lockmark = False
                for (i, line) in enumerate(temp):
                    if cmp(line, "\n") == 0:
                        if not lockmark:
                            endmark = i
                            lockmark = True
                    else:
                        endmark = -1
                        lockmark = False
                temp.close()

                if endmark > 1:
                    thefile = open(f, "w")
                    temp = open(ftmp, "r")
                    for (i, line) in enumerate(temp):
                        if i == endmark:
                            break
                            None
                        else:
                            thefile.write(line)
                    thefile.close()
                    temp.close()
                    util.remove(ftmp)
                else:
                    util.remove(f)
                    os.rename(ftmp, f)
            except:
                sys.stderr.write("Unable to clean scrollback file: " + f + "\n")
Пример #15
0
    def __save_win(self,winid,ctype,pids_data,ctime,rollback):
        errors=[]
        fname=os.path.join(self.basedir,self.savedir,"win_"+winid)
        if rollback[1]:
            #time=linecache.getline(rollback[0],2).strip()
            #copy scrollback
            shutil.move(rollback[1],os.path.join(self.basedir,self.savedir,"hardcopy."+winid))

        basedata_len=7

        f=open(fname,"a")
        if rollback[0]:
            rollback_dir=rollback[2]
            target=rollback[0]
            fr=open(target,'r')
            last_sep=1
            for i,line in enumerate(fr.readlines()[basedata_len:]):
                f.write(line)
                if line=='-\n':
                    last_sep=i
                elif i-last_sep==6 and line.startswith('vim_'):
                    #import vim files but update the window number in filename
                    for filename in glob.glob(os.path.join(rollback_dir,line.strip()+'_*')):
                        try:
                            tvim="vim_W%s_%s"%(winid,os.path.basename(filename).split('_',2)[2])
                            tvim=os.path.join(self.basedir,self.savedir,tvim)
                            shutil.move(filename,tvim)
                        except:
                            errors.append('Unable to rollback vim: %s'%filename)
            util.remove(target)
        else:
            pids_data_len="0"
            if(pids_data):
                pids_data_len=str(len(pids_data))
            f.write(pids_data_len+'\n')
            if(pids_data):
                for pid in pids_data:
                    f.write("-\n")
                    for i,data in enumerate(pid):
                        if i == 2:
                            if data.endswith('\0\0'):
                                data=data[:len(data)-1]
                            f.write(str(len(data.split('\0'))-1)+'\n')
                            f.write(str(data)+'\n')
                        else:
                            f.write(str(data)+'\n')
                f.write(ctime)
        f.close()
        return errors
Пример #16
0
def deleta_aluno(id_aluno):
    try:
        aluno = util.localiza(id_aluno, 'ALUNO')
        removido = util.remove(aluno, 'ALUNO')
        return jsonify(removido)
    except util.NotFoundError:
        return jsonify({'erro': 'aluno nao encontrado'}), 400
Пример #17
0
def deleta_disciplina(id_disciplina):
    try: 
        disciplina = util.localiza(id_disciplina,'DISCIPLINA')
        removido = util.remove(disciplina,'DISCIPLINA')
        return jsonify(removido)
    except util.NotFoundError:
        return jsonify({'erro':'disciplina nao encontrada'}),400
Пример #18
0
def deleta_professor(id_professor):
    try:
        professor = util.localiza(id_professor, 'PROFESSOR')
        deletado = util.remove(professor, 'PROFESSOR')
        return jsonify(deletado)
    except util.NotFoundError:
        return jsonify({'erro': 'professor nao encontrado'}), 400
Пример #19
0
def cleanup():
    cmd = ''
    f = open(sourcefile, 'w')
    for (i, w) in enumerate(win_history):
        if w == "-1":
            w = "-"
        f.write("""select %s
at \"%s\#\" kill
focus
""" % (w, wins[i]))
    f.flush()
    f.close()
    scs.source(sourcefile)
    scs.focusminsize(focusminsize)
    sc.cleanup()
    remove(sourcefile)
Пример #20
0
    def __scrollback_clean(self):
        '''clean up scrollback files: remove empty lines at the beginning and at the end of a file'''
        for f in glob.glob(os.path.join(self.basedir,self.savedir,'hardcopy.*')):
            try:
                ftmp=f+"_tmp"
                temp=open(ftmp,'w')
                thefile = open(f,'r')
                beginning=True
                for line in thefile:
                    if beginning: 
                        if cmp(line,'\n') == 0:
                            line = line.replace('\n','')
                        else:
                            beginning=False
                    temp.write(line)
                temp.close()
                thefile.close()

                temp = open( ftmp, 'r' )
                endmark=-1
                lockmark=False
                for i,line in enumerate(temp):
                    if cmp(line,'\n') == 0:
                        if not lockmark:
                            endmark=i
                            lockmark=True
                    else:
                        endmark=-1
                        lockmark=False
                temp.close()

                if endmark > 1:
                    thefile = open(f , 'w')
                    temp=open(ftmp,'r')
                    for i,line in enumerate(temp):
                        if i == endmark:
                            break;
                        else:
                            thefile.write(line)
                    thefile.close()
                    temp.close()
                    util.remove(ftmp)
                else:
                    util.remove(f)
                    os.rename(ftmp,f)
            except:
                out ('Unable to clean scrollback file: '+f)
Пример #21
0
def download(course):
    """
    Download grade book.
    :param course: A Coursera course object.
    :return: None.
    """
    path = course.get_info_folder() + '/temp.html'
    url = course.get_url() + '/admin/course_grade/export_grades'
    util.download(url, path, course.get_cookie_file())

    pattern = r'graded. <a href="(.*?)">'
    find = re.search(pattern, util.read_file(path), re.DOTALL)
    util.remove(path)

    if find:
        url = find.group(1)
        path = course.get_info_folder() + '/grades.csv'
        util.download(url, path, course.get_cookie_file())
Пример #22
0
def process_lineage(stacking, band_name, clip_extents, tile_id, rename,
                    workdir):
    """Create the lineage file."""
    logger.info('     Start processing for band: %s', rename)

    lineage_filename = os.path.join(workdir, tile_id,
                                    tile_id + '_' + rename + '.tif')

    if os.path.exists(lineage_filename):
        logger.warning("Skip previously generated result %s", lineage_filename)
        return lineage_filename

    temp_names = list()
    for level, stack in reversed(list(enumerate(stacking, start=1))):
        temp_name = lineage_filename.replace('.tif',
                                             '_srcTemp%d' % level + '.tif')
        scene_name = util.ffind(workdir, stack['LANDSAT_PRODUCT_ID'],
                                '*' + band_name + '.tif')

        calc_cmd = (
            'gdal_calc.py -A {scene} --outfile {temp}'
            ' --calc=" {level} * (A > -101)" --type="Byte" --NoDataValue=0')
        util.execute_cmd(
            calc_cmd.format(level=level, temp=temp_name, scene=scene_name))
        temp_names.append(temp_name)

    warp_cmd = ('gdalwarp -te {extents} -dstnodata "0" -srcnodata "0"'
                ' -ot "Byte" -wt "Byte"'
                ' -co "compress=deflate" -co "zlevel=9"'
                ' -co "tiled=yes" -co "predictor=2" ').format(
                    extents=clip_extents)
    warp_cmd += ' '.join(temp_names)
    warp_cmd += ' ' + lineage_filename
    util.execute_cmd(warp_cmd)
    util.remove(*temp_names)

    logger.info('    End processing for %s as %s ', band_name,
                lineage_filename)
    if not os.path.exists(lineage_filename):
        logger.error('Processing failed to generate desired output: %s',
                     lineage_filename)
    return lineage_filename
Пример #23
0
    def __save_layouts(self):
        (homelayout, homelayoutname) = self.get_layout_number()
        findir = sc.datadir
        if homelayout == -1:
            sys.stderr.write("No layouts to save.\n")
            return False
        path_layout = os.path.join(findir, "load_layout")
        oflayout = open(path_layout, "w")
        ex_lay = []
        for lay in sc.gen_layout_info(self, sc.dumpscreen_layout_info(self)):
            try:
                num = lay[0]
                title = lay[1]
            except:
                title = ""
            if self.excluded_layouts and (num in self.excluded_layouts or title in self.excluded_layouts):
                ex_lay.append(lay)
            else:
                sys.stdout.write("%s(%s); " % (num, title))
                oflayout.write(
                    """layout select %s
layout dump \"%s\"
dumpscreen layout \"%s\"
"""
                    % (num, os.path.join(findir, "layout_" + num), os.path.join(findir, "winlayout_" + num))
                )

        oflayout.write("layout select %s\n" % homelayout)
        oflayout.close()
        self.source(path_layout)
        util.remove(path_layout)
        linkify(findir, "layout_" + homelayout, "last_layout")
        if ex_lay:
            sys.stdout.write(
                """

Excluded layouts: %s"""
                % str(ex_lay)
            )

        out("")
        return True
Пример #24
0
def process_segments(segments, output_path, conf):
    """Clips tiles from a list of contiguous scenes aka segment."""
    for i, segment in enumerate(segments):
        # Cleanup unneeded scene directories to save space
        # Results of this check should keep no more than 3 scenes
        # in work directory at a time
        if (i - 2) > -1:
            previous_scene = segments[i - 2]['LANDSAT_PRODUCT_ID']
            util.remove(previous_scene)

        scene_state = process_segment(segment, output_path, conf)
        logger.info("Scene %s is %s.", segment['LANDSAT_PRODUCT_ID'],
                    scene_state)
        logger.info('Segment loop: %d', i)

    # cleanup any remaining scene directories
    for segment in segments:
        previous_scene = segment['LANDSAT_PRODUCT_ID']
        if not conf.debug:
            util.remove(previous_scene)
Пример #25
0
Файл: video.py Проект: kq2/Ricin
def download_subtitles(course, item):
    """
    Download all subtitles of this video.
    """
    if item['source_video']:
        url = '{}/admin/api/lectures/{}/subtitles'
        url = url.format(course.get_url(), item['item_id'])

        path = course.get_folder() + '/video/subtitles/temp.json'
        util.download(url, path, course.get_cookie_file())

        subtitles = util.read_json(path)
        util.remove(path)

        for subtitle in subtitles:
            url = subtitle['srt_url']
            if url:
                path = '{}/video/subtitles/{}.{}.srt'
                path = path.format(course.get_folder(), item['item_id'],
                                   subtitle['language'])
                util.download(url, path, course.get_cookie_file())
Пример #26
0
Файл: video.py Проект: kq2/Ricin
def download_compressed_video(course, item):
    """
    Download compressed video.
    """
    if item['source_video']:
        url = '{}/lecture/view?lecture_id={}&preview=1'
        url = url.format(course.get_url(), item['item_id'])

        path = '{}/video/compressed_videos/{}.html'
        path = path.format(course.get_folder(), item['item_id'])

        util.download(url, path, course.get_cookie_file())

        pattern = r'type="video/mp4" src="(.*?)"'
        url = re.search(pattern, util.read_file(path), re.DOTALL).group(1)

        util.remove(path)
        path = '{}/video/compressed_videos/{}.mp4'
        path = path.format(course.get_folder(), item['item_id'])

        util.download(url, path, course.get_cookie_file(), resume=True)
Пример #27
0
    def __save_layouts(self):
        (homelayout, homelayoutname) = self.get_layout_number()
        findir = sc.datadir
        if homelayout == -1:
            sys.stderr.write("No layouts to save.\n")
            return False
        path_layout = os.path.join(findir, "load_layout")
        oflayout = open(path_layout, "w")
        ex_lay = []
        for lay in sc.gen_layout_info(self, sc.dumpscreen_layout_info(self)):
            try:
                num = lay[0]
                title = lay[1]
            except:
                title = ""
            if self.excluded_layouts and (num in self.excluded_layouts or
                    title in self.excluded_layouts):
                ex_lay.append(lay)
            else:
                sys.stdout.write("%s(%s); " % (num, title))
                oflayout.write('''layout select %s
layout dump \"%s\"
dumpscreen layout \"%s\"
''' %
                               (num, os.path.join(findir, "layout_" +
                               num), os.path.join(findir, "winlayout_" +
                               num)))

        oflayout.write('layout select %s\n' % homelayout)
        oflayout.close()
        self.source(path_layout)
        util.remove(path_layout)
        linkify(findir, "layout_" + homelayout, "last_layout")
        if ex_lay:
            sys.stdout.write("""

Excluded layouts: %s""" % str(ex_lay))

        out("")
        return True
Пример #28
0
Файл: peer.py Проект: kq2/Ricin
def _download_assesment(course, url, folder):
    url = "{}/data/export/{}".format(course.get_url(), url)
    temp = "temp.html"
    util.download(url, temp, course.get_cookie_file())

    page = util.read_file(temp)
    util.remove(temp)

    pattern = r"<tbody>.*?</tbody>"
    table = re.findall(pattern, page, re.DOTALL)[-1]

    pattern = r'<td colspan="2">(.*?)</td>.*?<a href="(.*?/export/(.*?)\?.*?)">Download</a>'
    for tr_match in re.finditer(r"<tr>.*?</tr>", table, re.DOTALL):
        for match in re.finditer(pattern, tr_match.group(0), re.DOTALL):
            name = match.group(1).replace("&quot;", "").replace(":", "")
            name = name.replace("&lt;em&gt;", "")
            name = name.replace("&lt;/em&gt;", "")
            url = match.group(2)
            file_name = util.unquote(match.group(3))

            path = u"{}/peer_assessment/{}/{} {}".format(course.get_folder(), folder, name, file_name)
            util.download(url, path, course.get_cookie_file(), resume=True)
Пример #29
0
def prepare_windows(scs):
    global focusminsize
    regions = None
    regions = sc.get_regions(sc.dumpscreen_layout(scs.pid))
    sc.cleanup()
    focusminsize = "%s %s" % (regions.focusminsize_x, regions.focusminsize_x)
    regions_c = len(regions.regions)
    focus_offset = regions.focus_offset
    scs.focusminsize('0 0')
    this_win_history = []
    cmd = ''
    f = open(sourcefile, 'w')
    for i in range(0, regions_c):
        f.write("""screen -t scs-regions-helper %s %s %s %d
focus
""" %
                (subprogram, subprogram_args, inputfile, i))
    f.flush()
    f.close
    scs.source(sourcefile, "screen-session regions")
    remove(sourcefile)

    regions_n = []
    regions_n = sc.get_regions(sc.dumpscreen_layout(scs.pid))
    sc.cleanup()

    for r in (regions.regions)[focus_offset:]:
        this_win_history.append(r[0])
    for r in (regions.regions)[:focus_offset]:
        this_win_history.append(r[0])

    new_windows = []
    for r in (regions_n.regions)[focus_offset:]:
        new_windows.append(r[0])
    for r in (regions_n.regions)[:focus_offset]:
        new_windows.append(r[0])

    return (this_win_history, new_windows, regions_c)
Пример #30
0
    def __restore_mru(self):
        if self.enable_layout and not self.mru:
            pass
        else:
            try:
                if self.mru:
                    sys.stdout.write("\nRestoring MRU windows order:")
                else:
                    sys.stdout.write("\nSelecting last window:")

                mru_w = []
                ifmru = open(os.path.join(self.basedir, self.savedir,
                             "mru"), 'r')
                for line in ifmru:
                    n = line.strip()
                    try:
                        nw = (self.__wins_trans)[n]
                        mru_w.append('select ' + nw + '\n')
                        sys.stdout.write(' %s' % nw)
                        if not self.mru:
                            break
                    except:
                        if self.enable_layout:
                            mru_w.append('select -\n')
                        else:
                            pass
                ifmru.close()
                mru_w.reverse()
                path_mru_tmp = os.path.join(self.basedir, self.savedir,
                        "mru_tmp")
                ofmru = open(path_mru_tmp, "w")
                ofmru.writelines(mru_w)
                ofmru.close()
                self.source(path_mru_tmp)
                util.remove(path_mru_tmp)
            except:
                sys.stderr.write(' Failed to load MRU.')
            out("")
def ub_lb(x_star, Vy, cons, scale=1):
    m, d = Vy.shape
    c = util.read_constraint(cons)
    
    #determin direction
    z = direction(d)
    
    #determin the most promising area
    Vy = util.remove(Vy, util.ismember(x_star, Vy))
    t1 = util.mat_sub(np.matlib.repmat(x_star, m-1, 1), Vy)
    t2 = util.mat_add(np.matlib.repmat(x_star, m-1, 1), Vy) * 0.5
    
    #initialize upperbound and lowerbound
    ub = [0] * d
    lb = [0] * d
    for i in range(d):
        ub[i] = x_star[i] + z[i] * scale
        lb[i] = x_star[i] - z[i] * scale

    #determin upper bound
    check_ub1 = np.dot(t1.T, util.mat_sub(np.matlib.repmat(ub, m-1, 1), t2))
    check_ub2 = True
    while (np.sum(util.mat_abs(check_ub1)) >= 1e-6) and check_ub2 :
        for i in range(d):
            if ub[i] > c[i][1]:
                check_ub2 = False
                break
            else:
                ub[i] += z[i] * scale
        check_ub1 = np.dot(t1.T, util.mat_sub(np.matlib.repmat(ub, m-1, 1), t2))
    
    for j in range(d):
        ub[j] -= z[j] * scale

    #determin lower bound
    check_lb1 = np.dot(t1.T, util.mat_sub(np.matlib.repmat(lb, m-1, 1), t2))
    check_lb2 = True
    while (np.sum(util.mat_abs(check_lb1)) >= 1e-6) and check_lb2 :
        for i in range(d):
            if lb[i] < c[i][0]:
                check_lb2 = False
                break
            else:
                lb[i] -= z[i] * scale
        check_lb1 = np.dot(t1.T, util.mat_sub(np.matlib.repmat(lb, m-1, 1), t2))
        
    for j in range(d):
        lb[j] += z[j] * scale
    
    return (ub, lb, z)
Пример #32
0
Файл: video.py Проект: kq2/Ricin
def _download_new_quizzes(course, item, path):
    """
    Download new version in-video quizzes.
    """
    # Step 1, download a HTML that has quiz ID.
    url = '{}/lecture/view?quiz_v2_admin=1&lecture_id={}'
    url = url.format(course.get_url(), item['parent_id'])
    util.download(url, path, course.get_cookie_file())

    pattern = r'v2-classId="(.*?)".*?v2-id="(.*?)".*?v2-lecture-id="(.*?)"'
    find = re.search(pattern, util.read_file(path), re.DOTALL)
    class_id, v2_id, lecture_id = find.group(1, 2, 3)

    # if no quiz in this video, delete the file
    if not v2_id:
        util.remove(path)
        return

    # Step 2, download a JSON that has question ID.
    class_url = 'https://class.coursera.org/api/assess/v1/inVideo/class/' + class_id
    url = '{}/lecture/{}/{}'.format(class_url, lecture_id, v2_id)
    util.download(url, path, course.get_cookie_file())

    # Step 3, download each question.
    quiz = util.read_json(path)
    questions = quiz['assessment']['definition']['questions']
    for question_id, question in questions.items():
        url = '{}/questions/{}'.format(class_url, question_id)
        util.download(url, path, course.get_cookie_file())
        question_json = util.read_json(path)

        # add question content to quiz
        question['metadata'] = question_json['metadata']
        question['data'] = question_json['question']

    # write the whole quiz to file
    util.write_json(path, quiz)
Пример #33
0
    def __restore_mru(self):
        if self.enable_layout and not self.mru:
            pass
        else:
            try:
                if self.mru:
                    sys.stdout.write("\nRestoring MRU windows order:")
                else:
                    sys.stdout.write("\nSelecting last window:")

                mru_w = []
                ifmru = open(os.path.join(self.basedir, self.savedir, "mru"), "r")
                for line in ifmru:
                    n = line.strip()
                    try:
                        nw = (self.__wins_trans)[n]
                        mru_w.append("select " + nw + "\n")
                        sys.stdout.write(" %s" % nw)
                        if not self.mru:
                            break
                    except:
                        if self.enable_layout:
                            mru_w.append("select -\n")
                        else:
                            pass
                ifmru.close()
                mru_w.reverse()
                path_mru_tmp = os.path.join(self.basedir, self.savedir, "mru_tmp")
                ofmru = open(path_mru_tmp, "w")
                ofmru.writelines(mru_w)
                ofmru.close()
                self.source(path_mru_tmp)
                util.remove(path_mru_tmp)
            except:
                sys.stderr.write(" Failed to load MRU.")
            out("")
Пример #34
0
	def delchild(self, child):
		if child not in self.children:
			raise Error, 'delchild: child not in list'
		remove(child, self.children)
		if child in self.mouse_interest:
			remove(child, self.mouse_interest)
		if child in self.timer_interest:
			remove(child, self.timer_interest)
		if child = self.mouse_focus:
			self.mouse_focus = 0
Пример #35
0
def environment(roles=None, host=None):
    if roles is None:
        roles = fab.env.roles
    if host is None:
        host = fab.env.host
    if not host:
        fab.warn('no host')
    role_configs = [cfg['roles'][role] for role in roles]
    global_vars = remove(cfg, ['roles', 'hosts'])
    ret = reduce(
        merge,
        [global_vars, cfg['hosts'][host] if host else [], cfg['roles']['all']
         ] + role_configs)
    if 'document_root' not in ret:
        url = urlparse(ret['ftp']['url'])
        ret['document_root'] = url.path
    return ret
Пример #36
0
class Test_Set__WCAG_1_AA(Test_Set):

    _name = 'WCAG 1.0 AA'
    _description = 'Testing for conformance with Web Accessibility Guidelines 1.0, level Double-A.'
    _tests = (
        util.remove(Test_Set__WCAG_1_A._tests, Test__WCAG_1__8_1_Important) + (
            Test__WCAG_1__2_2_Images,
            Test__WCAG_1__3_1,
            Test__WCAG_1__3_2,
            Test__WCAG_1__3_3,
            Test__WCAG_1__3_4,
            Test__WCAG_1__3_5,
            Test__WCAG_1__3_6,
            Test__WCAG_1__3_7,
            Test__WCAG_1__5_3,
            Test__WCAG_1__5_4,
            Test__WCAG_1__6_4,
            Test__WCAG_1__6_5,
            Test__WCAG_1__7_2,
            Test__WCAG_1__7_3,
            Test__WCAG_1__7_4,
            Test__WCAG_1__7_5,
            Test__WCAG_1__8_1,
            Test__WCAG_1__9_2,
            Test__WCAG_1__9_3,
            Test__WCAG_1__10_1,
            Test__WCAG_1__10_2,
            Test__WCAG_1__11_1,
            Test__WCAG_1__11_2,
            Test__WCAG_1__12_2,
            Test__WCAG_1__12_3,
            Test__WCAG_1__12_4,
            Test__WCAG_1__13_1,
            Test__WCAG_1__13_2,
            Test__WCAG_1__13_3,
            Test__WCAG_1__13_4,
        ))
Пример #37
0
def nest_layout(session, src_layuot, dst_layout):
    src_dumpfile = os.path.join(tmpdir_source, 'nest_layout-dump-%d' % os.getpid())

    if not os.path.exists(tmpdir_source):
        os.makedirs(tmpdir_source)

    scs = ScreenSaver(session)

    print('layouts src: %s dst: %s' % (src_layout, dst_layout))

    regions_file_dst = regions_file = sc.dumpscreen_layout(scs.pid)
    regions_dst = sc.get_regions(regions_file)

    dst_focusminsize = "%s %s" % (regions_dst.focusminsize_x, regions_dst.focusminsize_y)
    dst_rsize = (int(regions_dst.regions[regions_dst.focus_offset][1]),
                int(regions_dst.regions[regions_dst.focus_offset][2]))
    dst_term_size = (int(regions_dst.term_size_x),
                    int(regions_dst.term_size_y))
    scs.layout('select %s' % src_layout, False)

    scs.layout('dump %s' % src_dumpfile, False)

    regions_file_src = sc.dumpscreen_layout(scs.pid)
    regions_src = sc.get_regions(regions_file_src)

    src_term_size = (int(regions_src.term_size_x), int(regions_src.term_size_y))

    print ('dst_rsize: %s' % str(dst_rsize))
    print ('src_term_size: %s' % str(src_term_size))

    scs.layout('select %s' % dst_layout, False)
    
    regions_new = sc.Regions()
    regions_new.focus_offset =  regions_dst.focus_offset + regions_src.focus_offset
    regions_new.term_size_x = regions_dst.term_size_x
    regions_new.term_size_y = regions_dst.term_size_y
    regions_new.focusminsize_x = regions_dst.focusminsize_x
    regions_new.focusminsize_y = regions_dst.focusminsize_y
    regions_new.regions = regions_dst.regions[:regions_dst.focus_offset]

    for (window, sizex, sizey) in regions_src.regions:
        print('SRC REGION' + str((window,sizex,sizey)))
        x = (int(sizex) * dst_rsize[0]) / src_term_size[0]
        y = (int(sizey) * dst_rsize[1]) / src_term_size[1]
        print( '%s * %d / %d = %d' % (sizex, dst_rsize[0], src_term_size[0], x))
        print( '%s * %d / %d = %d' % (sizey, dst_rsize[1], src_term_size[0], y))
        regions_new.regions.append((window, str(x), str(y)))

    regions_new.regions = regions_new.regions + regions_dst.regions[regions_dst.focus_offset+1:]
    
    print('destination regions: '+ str(regions_dst.regions))
    print('source regions: ' + str(regions_src.regions))
    print('new regions: ' + str(regions_new.regions))

    sc.layout_begin(session)
    sc.layout_load_dump(open(src_dumpfile, 'r'))
    sc.layout_load_regions(regions_new, None, dst_term_size[0], dst_term_size[1])
    sc.layout_end()

    remove(src_dumpfile)
    remove(regions_file_dst)
    remove(regions_file_src)
    sc.cleanup()
Пример #38
0
    def main(self):

        if self.N_proc == '-1':
            self.N_proc = N_CPU

        # check directory existence
        check_dir(self.templatePath, self.force)

        ## check consistency of b-shells and spatial resolution
        ref_bvals_file = pjoin(self.templatePath, 'ref_bshell_bvalues.txt')
        ref_res_file = pjoin(self.templatePath, 'ref_res_file.npy')
        if self.ref_csv:
            if isfile(ref_bvals_file) and isfile(ref_res_file):
                remove(ref_bvals_file)
                remove(ref_res_file)

            consistencyCheck(self.ref_csv, ref_bvals_file, ref_res_file)

        if self.target_csv:
            consistencyCheck(self.target_csv, ref_bvals_file, ref_res_file)

        ## separate b-shells
        if self.ref_csv:
            refListOutPrefix = separateShellsWrapper(self.ref_csv,
                                                     ref_bvals_file,
                                                     self.N_proc)
        if self.target_csv:
            tarListOutPrefix = separateShellsWrapper(self.target_csv,
                                                     ref_bvals_file,
                                                     self.N_proc)

        ## define variables for template creation and data harmonization

        # variables common to all ref_bvals
        pipeline_vars = [
            '--tar_name',
            self.target,
            '--nshm',
            self.N_shm,
            '--nproc',
            self.N_proc,
            '--template',
            self.templatePath,
        ]

        if self.reference:
            pipeline_vars.append(f'--ref_name {self.reference}')
        if self.N_zero:
            pipeline_vars.append(f'--nzero {self.N_zero}')
        if self.bvalMap:
            pipeline_vars.append(f'--bvalMap {self.bvalMap}')
        if self.resample:
            pipeline_vars.append(f'--resample {self.resample}')
        if self.denoise:
            pipeline_vars.append('--denoise')
        if self.travelHeads:
            pipeline_vars.append('--travelHeads')
        if self.force:
            pipeline_vars.append('--force')
        if self.debug:
            pipeline_vars.append('--debug')
        if self.verbose:
            pipeline_vars.append('--verbose')

        # the b-shell bvalues are sorted in descending order because we want to perform registration with highest bval
        ref_bvals = read_bvals(ref_bvals_file)[::-1]
        for bval in ref_bvals[:-1]:  # pass the last bval which is 0.

            if self.create and not self.process:
                print('## template creation ##')

                check_call((' ').join([
                    pjoin(SCRIPTDIR, 'harmonization.py'), '--tar_list',
                    tarListOutPrefix + f'_b{int(bval)}.csv', '--bshell_b',
                    str(int(bval)), '--ref_list', refListOutPrefix +
                    f'_b{int(bval)}.csv', '--create'
                ] + pipeline_vars),
                           shell=True)

            elif not self.create and self.process:
                print('## data harmonization ##')

                check_call((' ').join([
                    pjoin(SCRIPTDIR, 'harmonization.py'), '--tar_list',
                    tarListOutPrefix + f'_b{int(bval)}.csv',
                    f'--ref_list {refListOutPrefix}_b{int(bval)}.csv' if self.
                    ref_csv else '', '--bshell_b',
                    str(int(bval)), '--process'
                ] + pipeline_vars),
                           shell=True)

            elif self.create and self.process:
                check_call((' ').join([
                    pjoin(SCRIPTDIR, 'harmonization.py'), '--tar_list',
                    tarListOutPrefix + f'_b{int(bval)}.csv', '--bshell_b',
                    str(int(bval)), '--ref_list', refListOutPrefix +
                    f'_b{int(bval)}.csv', '--create', '--process'
                ] + pipeline_vars),
                           shell=True)

            if '--force' in pipeline_vars:
                pipeline_vars.remove('--force')

        ## join harmonized data
        if self.process:
            joinAllBshells(self.target_csv, ref_bvals_file, 'harmonized_',
                           self.N_proc)

            if self.debug and self.ref_csv:
                joinAllBshells(self.ref_csv, ref_bvals_file, 'reconstructed_',
                               self.N_proc)
Пример #39
0
def handler(signum, frame):
    global handler_lock
    if handler_lock:
        return
    else:
        handler_lock = True
    global win_history
    bSelect = False
    mode = -1
    number = 0
    try:
        f = open(inputfile, "r")
        ch = f.readline()
        f.close()
        remove(inputfile)
        try:
            number = int(ch[1:])
        except:
            number = 0

        if ch[0] == 's':
            mode = 1
        elif ch[0] == 'S':
            mode = 1
            bSelect = True
        elif ch[0] == " " or ch[0] == "'" or ch[0] == 'g' or ch[0] == 'G':
            mode = 0
            bSelect = True
            if ch[0] == 'G':
                number = -1 * number
        elif ch[0] == "l":
            mode = 2
            rnumber = number
        elif ch[0] == "L":
            mode = 2
            rnumber = number
            number = -1 * number
            bSelect = True
        elif ch[0] == "r":
            rnumber = -1 * number
            mode = 2
        elif ch[0] == "R":
            rnumber = -1 * number
            mode = 2
            bSelect = True
        else:
            mode = 0

        if number != 0 and mode == 1:
            tmp = win_history[0]
            win_history[0] = win_history[number]
            win_history[number] = tmp
        elif mode == 2:
            win_history = rotate_list(win_history, rnumber)
    except IOError:
        sys.stderr.write('No input file found.\n')

    cleanup()

    if number != 0 and bSelect:

        # this will work properly up to 62 regions (MAXSTR-4)/8

        command = SCREEN + ' -S "%s" -X eval' % session
        if number < 0:
            number = abs(number)
            cfocus = 'focus prev'
        else:
            cfocus = 'focus'
        for i in range(0, number):
            command += ' "%s"' % cfocus
        os.system(command)
    sys.exit(0)
Пример #40
0
def train_conv_net(datasets,
                   U,
                   idx_word_map,
                   img_w=300,
                   filter_hs=[3, 4, 5],
                   hidden_units=[100, 2],
                   dropout_rate=[0.5],
                   shuffle_batch=True,
                   n_epochs=25,
                   batch_size=50,
                   lr_decay=0.95,
                   conv_non_linear="relu",
                   activations=[Iden],
                   sqr_norm_lim=9,
                   non_static=True,
                   sen_dropout_rate=[0.0],
                   whether_train_sen=True):

    rng = np.random.RandomState(3435)
    img_h = datasets[0][0][0].shape[0] - 1
    filter_w = img_w
    feature_maps = hidden_units[0]
    filter_shapes = []
    pool_sizes = []
    for filter_h in filter_hs:
        filter_shapes.append((feature_maps, 1, filter_h, filter_w))
        pool_sizes.append((img_h - filter_h + 1, img_w - filter_w + 1))
    parameters = [("image shape", img_h, img_w),
                  ("filter shape", filter_shapes),
                  ("hidden_units", hidden_units), ("dropout", dropout_rate),
                  ("batch_size", batch_size), ("non_static", non_static),
                  ("learn_decay", lr_decay),
                  ("conv_non_linear", conv_non_linear),
                  ("non_static", non_static), ("sqr_norm_lim", sqr_norm_lim),
                  ("shuffle_batch", shuffle_batch),
                  ('sentence dropout rate', sen_dropout_rate)]
    print(parameters)

    #define model architecture
    index = T.lscalar()
    x = T.tensor3('x')
    y = T.ivector('y')
    sen_x = T.matrix('sen_x')
    mark = T.matrix('mark')
    sen_y = T.ivector('sen_y')
    Words = theano.shared(value=U, name="Words")
    zero_vec_tensor = T.vector()
    zero_vec = np.zeros(img_w)
    set_zero = theano.function([zero_vec_tensor],
                               updates=[
                                   (Words,
                                    T.set_subtensor(Words[0, :],
                                                    zero_vec_tensor))
                               ],
                               allow_input_downcast=True)
    layer0_input = Words[T.cast(x.flatten(), dtype="int32")].reshape(
        (x.shape[0] * x.shape[1], 1, x.shape[2], Words.shape[1]))
    sen_layer0_input = Words[T.cast(sen_x.flatten(), dtype='int32')].reshape(
        (sen_x.shape[0], 1, sen_x.shape[1], Words.shape[1]))
    conv_layers = []
    layer1_inputs = []
    Doc_length = datasets[0][0].shape[0]
    sen_layer1_inputs = []
    for i in xrange(len(filter_hs)):
        filter_shape = filter_shapes[i]
        pool_size = pool_sizes[i]
        conv_layer = LeNetConvPoolLayer(rng,
                                        input=layer0_input,
                                        image_shape=(None, 1, img_h, img_w),
                                        filter_shape=filter_shape,
                                        poolsize=pool_size,
                                        non_linear=conv_non_linear)
        layer1_input = conv_layer.output.flatten(2)
        conv_layers.append(conv_layer)
        layer1_inputs.append(layer1_input)
        sen_layer1_input = conv_layer.predict(sen_layer0_input,
                                              None).flatten(2)
        sen_layer1_inputs.append(sen_layer1_input)

    layer1_input = T.concatenate(layer1_inputs, 1)
    sen_layer1_input = T.concatenate(sen_layer1_inputs, 1)
    hidden_units[0] = feature_maps * len(filter_hs)
    sen_hidden_units = [feature_maps * len(filter_hs), 3]
    shaped_mark = T.flatten(mark)
    sen_classifier1 = MLPDropout(rng,
                                 input=sen_layer1_input,
                                 layer_sizes=sen_hidden_units,
                                 activations=activations,
                                 dropout_rates=sen_dropout_rate)
    sen_cost = sen_classifier1.dropout_negative_log_likelihood(sen_y)
    sen_pos_prob = T.max(
        sen_classifier1.predict_p(layer1_input)[:, np.array([0, 2])], axis=1)
    prev_layer1_output, updates = theano.scan(
        fn=lambda i, x: x[i * Doc_length:i * Doc_length + Doc_length],
        sequences=[T.arange(batch_size)],
        non_sequences=layer1_input * (sen_pos_prob.dimshuffle(0, 'x')) *
        (shaped_mark.dimshuffle(0, 'x')))
    layer1_output = T.sum(prev_layer1_output, axis=1)
    classifier = MLPDropout(rng,
                            input=layer1_output,
                            layer_sizes=hidden_units,
                            activations=activations,
                            dropout_rates=dropout_rate)

    #define parameters of the model and update functions using adadelta
    params = classifier.params
    for conv_layer in conv_layers:
        params += conv_layer.params
    if non_static:
        params += [Words]

    #add sentence level parameters
    sen_params = sen_classifier1.params
    for conv_layer in conv_layers:
        sen_params += conv_layer.params
    if non_static:
        sen_params += [Words]

    cost = classifier.negative_log_likelihood(y)
    dropout_cost = classifier.dropout_negative_log_likelihood(y)
    grad_updates = sgd_updates_adadelta(params, dropout_cost, lr_decay, 1e-6,
                                        sqr_norm_lim)
    sen_grad_updates = sgd_updates_adadelta(sen_params, sen_cost, lr_decay,
                                            1e-6, sqr_norm_lim)

    np.random.seed(3435)
    train_mask = np.zeros((datasets[0].shape[0], datasets[0].shape[1]),
                          dtype='float32')  ##doc length * number of documnts
    test_mask = np.zeros((datasets[2].shape[0], datasets[2].shape[1]),
                         dtype='float32')

    #set the mask
    for i in range(datasets[0].shape[0]):
        for j in range(datasets[0][i].shape[0]):
            if np.count_nonzero(datasets[0][i][j]) != 0:
                train_mask[i][j] = 1.0

    for i in range(datasets[2].shape[0]):
        for j in range(datasets[2][i].shape[0]):
            if np.count_nonzero(datasets[2][i][j]) != 0:
                test_mask[i][j] = 1.0

    if datasets[0].shape[0] % batch_size > 0:
        extra_data_num = batch_size - datasets[0].shape[0] % batch_size
        permuted_index = np.random.permutation(range(datasets[0].shape[0]))
        permuted_index = np.append(permuted_index,
                                   permuted_index[:extra_data_num])
        new_data = datasets[0][permuted_index]
    else:
        permuted_index = np.random.permutation(range(datasets[0].shape[0]))
        new_data = datasets[0][permuted_index]

    n_batches = new_data.shape[0] / batch_size
    n_train_batches = int(np.round(n_batches * 0.9))

    #divide train set into train/val sets
    train_set_y = datasets[1][permuted_index]
    test_set_x, test_set_y = shared_dataset(
        (datasets[2][:, :, :-1], datasets[3]))
    test_set_mark = theano.shared(test_mask.astype(theano.config.floatX))

    train_mask = train_mask[permuted_index]
    train_set_mark = train_mask[:n_train_batches * batch_size]
    train_set_mark = theano.shared(train_set_mark.astype(theano.config.floatX))

    train_set_with_sen_label = new_data[:n_train_batches * batch_size]
    val_set_with_sen_label = new_data[n_train_batches * batch_size:]

    train_set = new_data[:n_train_batches * batch_size, :, :-1]
    train_set_label = train_set_y[:n_train_batches * batch_size]

    val_set = new_data[n_train_batches * batch_size:, :, :-1]
    val_set_label = train_set_y[n_train_batches * batch_size:]
    val_set_mark = train_mask[n_train_batches * batch_size:]
    val_set_mark = theano.shared(val_set_mark.astype(theano.config.floatX))

    train_set_x, train_set_y = shared_dataset((train_set, train_set_label))

    val_set_x, val_set_y = shared_dataset((val_set, val_set_label))

    n_val_batches = n_batches - n_train_batches
    val_model = theano.function(
        [index],
        classifier.errors(y),
        givens={
            x: val_set_x[index * batch_size:(index + 1) * batch_size],
            y: val_set_y[index * batch_size:(index + 1) * batch_size],
            mark: val_set_mark[index * batch_size:(index + 1) * batch_size]
        },
        allow_input_downcast=True)

    #compile theano functions to get train/val/test errors
    test_model = theano.function(
        [index],
        classifier.errors(y),
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size],
            mark: train_set_mark[index * batch_size:(index + 1) * batch_size]
        },
        allow_input_downcast=True)

    train_model = theano.function(
        [index],
        cost,
        updates=grad_updates,
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size],
            mark: train_set_mark[index * batch_size:(index + 1) * batch_size]
        },
        allow_input_downcast=True)

    test_pred_layers = []
    test_size = datasets[2].shape[0]
    test_batch_size = 1
    n_test_batches = int(math.ceil(test_size / float(test_batch_size)))
    test_layer0_input = Words[T.cast(x.flatten(), dtype="int32")].reshape(
        (x.shape[0] * x.shape[1], 1, x.shape[2], Words.shape[1]))
    for conv_layer in conv_layers:
        test_layer0_output = conv_layer.predict(test_layer0_input,
                                                test_batch_size * Doc_length)
        test_pred_layers.append(test_layer0_output.flatten(2))
    test_layer1_input = T.concatenate(test_pred_layers, 1)
    test_sen_prob = T.max(
        sen_classifier1.predict_p(test_layer1_input)[:, np.array([0, 2])],
        axis=1)
    test_sen_prob_to_sen, updates = theano.scan(
        fn=lambda i, x: x[i * Doc_length:i * Doc_length + Doc_length],
        sequences=[T.arange(test_batch_size)],
        non_sequences=test_sen_prob)

    sorted_index = T.argsort(test_sen_prob_to_sen * shaped_mark, axis=-1)[:,
                                                                          -5:]
    sorted_sentence, updates = theano.scan(
        fn=lambda i, y: y[i, sorted_index[i], :],
        sequences=[T.arange(sorted_index.shape[0])],
        non_sequences=x)
    sorted_prob, updates = theano.scan(
        fn=lambda i, z: z[i, sorted_index[i]],
        sequences=[T.arange(sorted_index.shape[0])],
        non_sequences=test_sen_prob_to_sen)

    sorted_sentence_value = theano.function(
        [index],
        sorted_sentence,
        allow_input_downcast=True,
        givens={
            x:
            test_set_x[index * test_batch_size:(index + 1) * test_batch_size],
            mark:
            test_set_mark[index * test_batch_size:(index + 1) *
                          test_batch_size]
        })

    sorted_prob_val = theano.function(
        [index],
        sorted_prob,
        allow_input_downcast=True,
        givens={
            x:
            test_set_x[index * test_batch_size:(index + 1) * test_batch_size],
            mark:
            test_set_mark[index * test_batch_size:(index + 1) *
                          test_batch_size]
        })

    test_layer1_output, updates = theano.scan(
        fn=lambda i, x: x[i * Doc_length:i * Doc_length + Doc_length],
        sequences=[T.arange(test_batch_size)],
        non_sequences=test_layer1_input * (test_sen_prob.dimshuffle(0, 'x')) *
        (shaped_mark.dimshuffle(0, 'x')))
    test_layer1_output = T.sum(test_layer1_output, axis=1)
    test_y_pred = classifier.predict(test_layer1_output)
    test_error = T.mean(T.neq(test_y_pred, y))

    test_model_all = theano.function(
        [index],
        test_error,
        allow_input_downcast=True,
        givens={
            x:
            test_set_x[index * test_batch_size:(index + 1) * test_batch_size],
            y:
            test_set_y[index * test_batch_size:(index + 1) * test_batch_size],
            mark:
            test_set_mark[index * test_batch_size:(index + 1) *
                          test_batch_size],
        })

    print('... training')
    epoch = 0
    best_val_perf = 0
    val_perf = 0
    test_perf = 0
    cost_epoch = 0
    sen_batch_size = 50
    best_sen_param = []
    for p in sen_params:
        best_sen_param.append(theano.shared(p.get_value()))

    #first training on sentences
    best_sen_val = 0.0
    if whether_train_sen == True:
        print('pre-train on sentences')
        while (epoch < 20):
            sen_costs = []
            train_sen = train_set_with_sen_label
            train_sentences = util.doc_to_sen(train_sen)
            train_sentences = util.remove(train_sentences)
            train_sentences = util.downsample_three(train_sentences)
            print("positive sentences after sampling: " +
                  str(np.sum(train_sentences[:, -1] == 2)))
            print("negative sentences after sampling: " +
                  str(np.sum(train_sentences[:, -1] == 0)))
            print("neutral sentences after sampling: " +
                  str(np.sum(train_sentences[:, -1] == 1)))
            train_sentences = np.random.permutation(train_sentences)
            if train_sentences.shape[0] % sen_batch_size != 0:
                extra_data_num = sen_batch_size - train_sentences.shape[
                    0] % sen_batch_size
                extra_index = np.random.permutation(
                    range(train_sentences.shape[0]))[:extra_data_num]
                train_sentences = np.vstack(
                    (train_sentences, train_sentences[extra_index]))
            train_sen_x, train_sen_y = shared_dataset(
                (train_sentences[:, :-1], train_sentences[:, -1]))
            train_sen_model = theano.function(
                [index],
                sen_cost,
                updates=sen_grad_updates,
                givens={
                    sen_x:
                    train_sen_x[index * sen_batch_size:(index + 1) *
                                sen_batch_size],
                    sen_y:
                    train_sen_y[index * sen_batch_size:(index + 1) *
                                sen_batch_size]
                })

            n_train_sen_batches = train_sentences.shape[0] / sen_batch_size
            for minibatch_index_1 in np.random.permutation(
                    range(n_train_sen_batches)):
                cur_sen_cost = train_sen_model(minibatch_index_1)
                sen_costs.append(cur_sen_cost)
                set_zero(zero_vec)

            print("training sentence cost: " +
                  str(sum(sen_costs) / len(sen_costs)))
            val_sen = val_set_with_sen_label
            val_sentences = util.doc_to_sen(val_sen)
            val_sentences = util.remove(val_sentences)
            print("positive sentences in the validation set: " +
                  str(np.sum(val_sentences[:, -1] == 2)))
            print("negative sentences in the validation set: " +
                  str(np.sum(val_sentences[:, -1] == 0)))
            print("neutral sentences in the validation set: " +
                  str(np.sum(val_sentences[:, -1] == 1)))
            val_sen_x, val_sen_y = shared_dataset(
                (val_sentences[:, :-1], val_sentences[:, -1]))
            val_sen_model = theano.function([],
                                            sen_classifier1.errors(sen_y),
                                            givens={
                                                sen_x: val_sen_x,
                                                sen_y: val_sen_y
                                            })
            val_accuracy = 1 - val_sen_model()
            print("validation sentence accuracy: " + str(val_accuracy))
            if val_accuracy > best_sen_val:
                best_sen_val = val_accuracy
                for i, p in enumerate(best_sen_param):
                    p.set_value(sen_params[i].get_value())
            epoch = epoch + 1
        for i, sp in enumerate(sen_params):
            sp.set_value(best_sen_param[i].get_value())

    #train on documents
    epoch = 0
    while (epoch < n_epochs):
        start_time = time.time()
        epoch = epoch + 1
        if shuffle_batch:
            for minibatch_index in np.random.permutation(
                    range(n_train_batches)):
                cost_epoch = train_model(minibatch_index)
                set_zero(zero_vec)
        else:
            for minibatch_index in xrange(n_train_batches):
                cost_epoch = train_model(minibatch_index)
                set_zero(zero_vec)
        train_losses = [test_model(i) for i in xrange(n_train_batches)]
        train_perf = 1 - np.mean(train_losses)
        val_losses = [val_model(i) for i in xrange(n_val_batches)]
        val_perf = 1 - np.mean(val_losses)
        print(
            'epoch: %i, training time: %.2f secs, train perf: %.2f %%, val perf: %.2f %%'
            % (epoch, time.time() - start_time, train_perf * 100.,
               val_perf * 100.))
        if val_perf >= best_val_perf:
            best_val_perf = val_perf
            test_loss = [test_model_all(i) for i in xrange(n_test_batches)]
            test_perf = 1 - np.sum(test_loss) / float(test_size)
            print("best test performance so far: " + str(test_perf))
    test_loss = [test_model_all(i) for i in xrange(n_test_batches)]
    new_test_loss = []
    for i in test_loss:
        new_test_loss.append(np.asscalar(i))
    test_loss = new_test_loss
    correct_index = np.where(np.array(test_loss) == 0)[0]
    count_pos = 0
    test_labels = np.array(datasets[3])

    # sample two correctly predicted positive documents and two correctly predicted negative documents
    # for each document, generate top five rationales with highest probabilities
    print("negative estimated rationales: ")
    print(len(idx_word_map))
    for c in correct_index:
        if test_labels[c] == 1: continue
        print(util.convert(sorted_sentence_value(c)[0], idx_word_map))
        print(sorted_prob_val(c))
        count_pos += 1
        if count_pos == 2:
            break

    count_neg = 0
    print("positive estimated rationales: ")
    for c in correct_index:
        if test_labels[c] == 0: continue
        print(util.convert(sorted_sentence_value(c)[0], idx_word_map))
        print(sorted_prob_val(c))
        count_neg += 1
        if count_neg == 2:
            break
    return test_perf
Пример #41
0
    def __save_win(self, winid, ctype, pids_data, ctime, rollback):

        # print (self,winid,ctype,pids_data,ctime,rollback)

        errors = []
        fname = os.path.join(self.basedir, self.savedir, "win_" + winid)
        if rollback[1]:

            # time=linecache.getline(rollback[0],2).strip()
            # copy scrollback

            shutil.move(rollback[1], os.path.join(self.basedir, self.savedir, "hardcopy." + winid))

        basedata_len = 8
        zombie_vector_pos = 8
        zombie_vector = linecache.getline(fname, zombie_vector_pos)

        f = open(fname, "a")
        # import changes from previous savefiles (run with non-restarted windows still running primer)
        if rollback[0]:
            print ("Rolling back to primer data in windows running primer")
            rollback_dir = rollback[2]
            target = rollback[0]
            fr = open(target, "r")
            last_sep = 1
            for (i, line) in enumerate(fr.readlines()[basedata_len:]):
                f.write(line)
                if line == "-\n":
                    last_sep = i
                elif i - last_sep == 6 and line.startswith("vim_"):

                    # import vim files but also update the window number in a filename

                    for filename in glob.glob(os.path.join(rollback_dir, line.strip() + "_*")):
                        try:
                            tvim = "vim_W%s_%s" % (winid, os.path.basename(filename).split("_", 2)[2])
                            tvim = os.path.join(self.basedir, self.savedir, tvim)
                            shutil.move(filename, tvim)
                        except:
                            errors.append("Unable to rollback vim: %s" % filename)
                elif i - last_sep == 6 and line.startswith("shellvars_"):
                    print ("shellvars")

                    # import shellvars files but also update the window number in the datafile

                    filename = os.path.join(rollback_dir, line.strip())
                    try:
                        tshellvars = "shellvars_W%s_%s" % (winid, os.path.basename(filename).split("_", 2)[2])
                        print tshellvars
                        tshellvars = os.path.join(self.basedir, self.savedir, tshellvars)
                        shutil.move(filename, tshellvars)
                        print filename, tshellvars
                    except:
                        errors.append("Unable to rollback shellvars: %s" % filename)
            util.remove(target)
        else:
            pids_data_len = "1"
            if pids_data:
                pids_data_len = str(len(pids_data) + 1)
            f.write(pids_data_len + "\n")
            f.write("-\n")
            f.write("-1\n")
            f.write(zombie_vector)
            f.write("%d\n" % (len(zombie_vector.split("\x00")) - 1))
            f.write(zombie_vector)
            f.write("-1\n")
            f.write("-1\n")
            if pids_data:
                for pid in pids_data:
                    f.write("-\n")
                    for (i, data) in enumerate(pid):
                        if i == 2:
                            if data.endswith("\0\0"):
                                data = data[: len(data) - 1]
                            f.write(str(len(data.split("\x00")) - 1) + "\n")
                            f.write(str(data) + "\n")
                        else:
                            f.write(str(data) + "\n")
            f.write(ctime)
        f.close()
        return errors
Пример #42
0
	def no_mouse(self, child):
		if child in self.mouse_interest:
			remove(child, self.mouse_interest)
			if not self.mouse_interest:
				self.parent.no_mouse(self)
Пример #43
0
 def post(self, user_id, song_id):
     u_id = int(user_id)
     s_id = int(song_id)
     self.write(util.remove(u_id, s_id))
Пример #44
0
def process_segment(segment, output_path, conf):
    """Produce all necessary products for a segment of acquisitions."""
    scene_state = "INWORK"
    # update PROCESSING_STATE in ARD_PROCESSED_SCENES to 'INWORK'
    db.update_scene_state(db.connect(conf.connstr),
                          segment['LANDSAT_PRODUCT_ID'], scene_state)

    logger.info("Scene %s is %s.", segment['LANDSAT_PRODUCT_ID'], scene_state)
    id_parts = landsat.match_dt(segment['LANDSAT_PRODUCT_ID'])

    pathrow = id_parts['wrspath'] + id_parts['wrsrow']
    if pathrow in pathrow2regionLU.keys():
        region = pathrow2regionLU[pathrow]
    else:
        scene_state = "NOGRID"
        db.update_scene_state(db.connect(conf.connstr),
                              segment['LANDSAT_PRODUCT_ID'], scene_state)
        return scene_state

    # Intersect scene with tile grid to find all touching tiles, and
    # consecutive scenes needed to produce them
    hv_tiles, tile_scenes = (geofuncs.get_tile_scene_intersections(
        db.connect(conf.connstr), segment['LANDSAT_PRODUCT_ID'], region))

    logger.info('Number of tiles to create: %d', len(hv_tiles))
    if not hv_tiles:
        logger.error('No scene coordinates found for %s',
                     segment['LANDSAT_PRODUCT_ID'])

        scene_state = "ERROR"
        # update PROCESSING_STATE in ARD_PROCESSED_SCENES
        db.update_scene_state(db.connect(conf.connstr),
                              segment['LANDSAT_PRODUCT_ID'], scene_state)
        return scene_state

    tiling_error_encountered = 0
    for current_tile in hv_tiles:
        try:
            tile_id = landsat.generate_tile_id(segment['LANDSAT_PRODUCT_ID'],
                                               current_tile, region,
                                               conf.collection, conf.version)
            tile_state = process_tile(current_tile, tile_id, segment, region,
                                      tile_scenes, output_path, conf)
            if tile_state == 'ERROR':
                tiling_error_encountered = 1
        except ArdTileNotNeededException:
            logger.warning('Lineage file found 0 contributing scenes,'
                           ' set to NOT NEEDED')
        except ArdTileException:
            logger.exception('Error caught while processing tile %s!',
                             current_tile)
        except ArdSceneException:
            logger.exception('Error caught while processing scene %s!',
                             segment['LANDSAT_PRODUCT_ID'])
            tiling_error_encountered = 1
        except Exception:
            logger.exception('Unexpected error processing tile %s!',
                             current_tile)
            tiling_error_encountered = 1

        # Remove the temporary work directory,
        # but keep adjacent scenes for other tiles
        if not conf.debug:
            logger.info('    Cleanup: Removing temp directory: %s ...',
                        os.path.join(conf.workdir, tile_id))
            util.remove(os.path.join(conf.workdir, tile_id))

    # If no tiling errors were encountered that may call for a retry,
    # we're done with this scene.  Otherwise, mark the scene for a
    # retry so that failed tiles can be reattempted.
    if not tiling_error_encountered:
        scene_state = "COMPLETE"
    else:
        scene_state = "ERROR"

    # update PROCESSING_STATE in ARD_PROCESSED_SCENES
    db.update_scene_state(db.connect(conf.connstr),
                          segment['LANDSAT_PRODUCT_ID'], scene_state)

    return scene_state
Пример #45
0
 def post(self, user_id, song_id):
     u_id = int(user_id)
     s_id = int(song_id)
     self.write(util.remove(u_id, s_id))
Пример #46
0
import util
import math

NUM_TESTS = 10


def startBench(coroutines) -> int:
    return int(util.run('benchSwitch', [coroutines])[7])


print('--Start go bench--')
util.build_go('go/benchSwitch.go')

switchesPerSecond = [startBench('100') for i in range(NUM_TESTS)]

mean, std = util.stdev(switchesPerSecond)

print('Result: {} +/- {}'.format(mean, std))

util.remove('benchSwitch')
Пример #47
0
def clean():
    """Removes all windows libs from libs folder"""
    d = curr_directory() + "/libs/"
    remove(d)
Пример #48
0
import util
import math

NUM_TESTS = 10


def startBench(coroutines) -> int:
    return int(util.run('bench0', [coroutines])[2])


print('--Start go bench--')
util.build_go('go/bench0.go')

time = [startBench('100000') for i in range(NUM_TESTS)]

mean, std = util.stdev(time)

print('Result: {} +/- {}'.format(mean, std))

util.remove('bench0')
Пример #49
0
def clean():
    """Removes all windows libs from libs folder"""
    d = curr_directory() + '/libs/'
    remove(d)
Пример #50
0
def run(session, requirecleanup_win, requirecleanup_lay, curwin, curlay,
        height, select_other = False):
    global lock_and_com_file, mru_file
    lltmpdir = os.path.join(tmpdir, '___layoutlist')
    try:
        os.makedirs(lltmpdir)
    except:
        pass

    signal.signal(signal.SIGINT, handler)
    session = session.split('.', 1)[0]

    ret = 0
    ss = ScreenSaver(session)
    wnum = os.getenv('WINDOW')
    if requirecleanup_lay:
        lnum = ss.get_layout_number()[0]
    else:
        lnum = None

    mru_file = os.path.join(lltmpdir, '%s_MRU' % session)
    if select_other:
        mru_layouts = pickle.load(open(mru_file, 'r'))
        num, title = mru_layouts[1]
        tmp = mru_layouts[0]
        mru_layouts[0] = mru_layouts[1]
        mru_layouts[1] = tmp
        ss.command_at(False, 'eval "layout select %s" "layout title"' %
                      num)
        pickle.dump(mru_layouts, open(mru_file, 'w'))
        return ret
    if NO_END:
        lock_and_com_file = os.path.join(lltmpdir, '%s' %
                session)
        f = open(lock_and_com_file, 'w')
        f.write(str(os.getpid()) + '\n')
        if requirecleanup_win and not requirecleanup_lay:
            f.write(wnum + '\n')
        else:
            f.write('-1\n')
        if requirecleanup_lay:
            f.write(lnum + '\n')
        else:
            f.write('-1\n')
        f.close()

    try:
        try:
            layinfo = list(sc.gen_layout_info(ss, sc.dumpscreen_layout_info(ss)))
        finally:
            sc.cleanup()
    except:
        sys.stderr.write('Layouts dumping error.\n')
        return 1
    screen = curses.initscr()
    (laytable, pos_start) = create_table(ss, screen, curlay, layinfo,
            lnum, height)
    try:
        curses.start_color()
    except:
        curses.endwin()
        sys.stderr.write('start_color() failed!\n')
        return 1

    curses.noecho()

    #screen.notimeout(1)

    try:
        choice = menu_table(
            ss,
            screen,
            lnum,
            curwin,
            curlay,
            layinfo,
            laytable,
            pos_start[0],
            pos_start[1],
            height,
            )
        if requirecleanup_lay and choice == lnum:
            choice = curlay
    except Exception:
        import traceback
        traceback.print_exc(file=sys.stderr)
        choice = curlay
        ret = 1
    curses.endwin()
    if NO_END:
        from util import remove
        remove(lock_and_com_file)
    if requirecleanup_lay:
        ss.command_at(False,
                      'eval "layout select %s" "layout remove %s" "at \"%s\#\" kill" "layout title"' %
                      (choice, lnum, wnum))
    elif requirecleanup_win:
        ss.command_at(False,
                      'eval "select %s" "layout select %s" "at \"%s\#\" kill" "layout title"' %
                      (curwin, choice, wnum))
    else:
        ss.command_at(False, 'eval "layout select %s" "layout title"' %
                      choice)
    return ret
Пример #51
0
    def __save_screen(self):
        errors=[]
        homewindow=self.homewindow
        group_wins={}
        group_groups={}
        excluded_wins=[]
        excluded_groups=[]
        scroll_wins=[]
        scroll_groups=[]
        cwin=-1
        ctty=None
        cppids={}
        rollback=None,None,None
        ctime=self.time()
        findir=os.path.join(self.basedir,self.savedir)
        #sc_cwd=self.command_at(True,'hardcopydir')
        #print(sc_cwd)
        self.command_at(False, 'at \# dumpscreen window %s'%os.path.join(self.basedir,self.savedir,"winlist"))
        self.command_at(False, 'at \# dumpscreen window %s -F'%os.path.join(self.basedir,self.savedir))
        self.command_at(False, 'hardcopydir %s'%os.path.join(self.basedir,self.savedir))
        self.command_at(False, 'at \# hardcopy -h')
        self.command_at(False, 'hardcopydir \"%s\"'%self.homedir) # should be modified to properly restore hardcopydir(:dumpscreen settings)
        try:
            f=open(os.path.join(findir,"winlist"),'r')
            f.close()
        except:
            self.command_at(False, 'at \# dumpscreen window %s'%os.path.join(self.basedir,self.savedir,"winlist"))
        fmru = open(os.path.join(findir,"mru"),"w") 
        for line in open(os.path.join(findir,"winlist"),'r'):
            try:
                id,cgroupid,ctty,ctitle = line.strip().split(' ',3)
            except:
                id,cgroupid,ctty= line.strip().split(' ')
                ctitle=None
            cwin=id
            fmru.write("%s "%cwin)
            
            if(ctty[0]=='z'): # zombie
                continue
            if(ctty[0]=="g"): # group
                ctype="group"
                cpids = None
                cpids_data=None
                if self.excluded:
                    if cwin in self.excluded or ctitle in self.excluded:
                        excluded_groups.append(cwin)
                    try:
                        group_groups[cgroupid]+=[cwin]
                    except:
                        group_groups[cgroupid]=[cwin]
                if self.scroll:
                    if cwin in self.scroll or ctitle in self.scroll:
                        scroll_groups.append(cwin)
                    try:
                        group_groups[cgroupid]+=[cwin]
                    except:
                        group_groups[cgroupid]=[cwin]
            else:
                if self.excluded:
                    if cwin in self.excluded or ctitle in self.excluded:
                        excluded_wins.append(cwin)
                    else:
                        try:
                            group_wins[cgroupid]+=[cwin]
                        except:
                            group_wins[cgroupid]=[cwin]
                if self.scroll:
                    if cwin in self.scroll or ctitle in self.scroll:
                        scroll_wins.append(cwin)
                    else:
                        try:
                            group_wins[cgroupid]+=[cwin]
                        except:
                            group_wins[cgroupid]=[cwin]
                if(ctty[0]=="t"): # telnet
                    ctype="telnet"
                    cpids = None
                    cpids_data=None
                else:
                    ctype="basic"
                    # get sorted pids in window
                    cpids=sc.get_tty_pids(ctty)
                    cpids_data=[]
                    ncpids=[]
                    for pid in cpids:
                        try:
                            pidinfo=sc.get_pid_info(pid)
                            (exehead,exetail)=os.path.split(pidinfo[1])
                            if exetail in self.blacklist:
                                blacklist=True
                            else:
                                blacklist=False
                            cpids_data.append(pidinfo+tuple([blacklist]))
                            ncpids.append(pid)
                        except:
                            errors.append('%s PID %s: Unable to access. No permission or no procfs.'%(cwin,pid))
                    cpids=ncpids
            
            if(cpids):
                for i,pid in enumerate(cpids):
                    if(cpids_data[i][3]):
                        text="BLACKLISTED"
                    else: 
                        text=""
                    l=cpids_data[i][2].split('\0')
                    jremove=[]
                    wprev=False
                    for j,w in enumerate(l):
                        if w == '-ic' or w == '-c':
                            wprev=True
                        elif wprev:
                            if w.startswith(self.primer):
                                jremove+=j,j-1
                            wprev=False
                    if jremove:
                        s=[]
                        for j,w in enumerate(l):
                            if j not in jremove:
                                s.append(w)
                        newdata=(cpids_data[i][0],cpids_data[i][1],"\0".join(["%s"%v for v in s]),cpids_data[i][3])
                        cpids_data[i]=newdata

                    #out('%s    pid = %s:     cwd = %s;  exe = %s;  cmdline = %s' % (text,pid, cpids_data[i][0], cpids_data[i][1], cpids_data[i][2]))
                    vim_name=str(None)
                    args=cpids_data[i][2].split('\0')
                    if args[0].endswith(self.primer_base) and args[1]=='-p':
                        sys.stdout.write('(primer)')
                        rollback=self.__rollback(cpids_data[i][2])
                        #out(str(rollback))
                    elif args[0] in self.vim_names and self.bVim:
                        sys.stdout.write('(vim)')
                        vim_name=self.__save_vim(id)
                        nargs=[]
                        rmarg=False
                        for arg in args:
                            if rmarg:
                                rmarg=False
                                pass
                            elif arg in ('-S','-i'):
                                rmarg=True
                            else:
                                nargs.append(arg)
                        args=nargs
                        newdata=(cpids_data[i][0],cpids_data[i][1],"\0".join(["%s"%v for v in args]),cpids_data[i][3])
                        cpids_data[i]=newdata
                    
                    cpids_data[i]=(cpids_data[i][0],cpids_data[i][1],cpids_data[i][2],cpids_data[i][3],vim_name)
            scrollback_filename=os.path.join(self.basedir,self.savedir,"hardcopy."+id)
            sys.stdout.write("%s %s | "%(cwin,ctype))
            errors+=self.__save_win(id,ctype,cpids_data,ctime,rollback)
            rollback=None,None,None
        out('')
        fmru.close()
        util.remove(os.path.join(findir,"winlist"))
        # remove ignored scrollbacks
        if 'all' in self.scroll:
            for f in glob.glob(os.path.join(self.basedir, self.savedir, "hardcopy.*")):
                open(f,'w')
        elif self.scroll:
            scroll_groups_tmp=[]
            while scroll_groups:
                sgroup=scroll_groups.pop()
                if sgroup not in scroll_groups_tmp:
                    scroll_groups_tmp.append(sgroup)
                try:
                    ngroups = group_groups[sgroup]
                    if ngroups:
                        for g in ngroups:
                            scroll_groups.append(g)
                except:
                    pass
            scroll_groups = scroll_groups_tmp
            out('Scrollback excluded groups: %s'%str(scroll_groups))
            for sgroup in scroll_groups:
                scroll_wins.append(sgroup)
                try:
                    for w in group_wins[sgroup]:
                        scroll_wins.append(w)
                except:
                    pass
            out('All scrollback excluded windows: %s'%str(scroll_wins))
            for w in scroll_wins:
                util.remove(os.path.join(self.basedir, self.savedir, "hardcopy.%s"%w))
        # remove ignored windows
        if self.excluded:
            excluded_groups_tmp=[]
            while excluded_groups:
                egroup=excluded_groups.pop()
                if egroup not in excluded_groups_tmp:
                    excluded_groups_tmp.append(egroup)
                try:
                    ngroups = group_groups[egroup]
                    if ngroups:
                        for g in ngroups:
                            excluded_groups.append(g)
                except:
                    pass
            excluded_groups = excluded_groups_tmp
            out('Excluded groups: %s'%str(excluded_groups))
            for egroup in excluded_groups:
                excluded_wins.append(egroup)
                try:
                    for w in group_wins[egroup]:
                        excluded_wins.append(w)
                except:
                    pass
            out('All excluded windows: %s'%str(excluded_wins))
            bpath1 = os.path.join(self.basedir, self.savedir, "win_")
            bpath2 = os.path.join(self.basedir, self.savedir, "hardcopy.")
            bpath3 = os.path.join(self.basedir, self.savedir, "vim_W")
            for win in excluded_wins:
                util.remove(bpath1+win)
                util.remove(bpath2+win)
                for f in glob.glob(bpath3+win+'_*'):
                    util.remove(f)

        linkify(os.path.join(self.basedir,self.savedir),"win_"+homewindow,"last_win")
        if errors:
            out('Errors:')
            for error in errors:
                out(error)
        out('\nSaved: '+str(ctime))
Пример #52
0
def process_browse(bands, workdir, tile_id, outpath):
    """Create a pyramid-layered RBG browse file for EE."""
    logger.info('     Start processing for BROWSE')

    output_browse_filename = os.path.join(outpath, tile_id + '.tif')
    if os.path.exists(output_browse_filename):
        logger.warning("Skip previously generated result %s",
                       output_browse_filename)
        return output_browse_filename

    bands = {
        k: util.ffind(workdir, tile_id, tile_id + '_' + v + '.tif')
        for k, v in bands.items()
    }

    # create RGB image
    temp_filename1 = os.path.join(workdir, tile_id + '_brw1.tif')
    merge_cmd = 'gdal_merge.py -o {outfile} -separate {red} {green} {blue}'
    results = util.execute_cmd(
        merge_cmd.format(outfile=temp_filename1, **bands))
    if results['status'] != 0:
        return results['status']

    # scale the pixel values
    temp_filename2 = os.path.join(workdir, tile_id + '_brw2.tif')
    scale_cmd = 'gdal_translate -scale 0 10000 -ot Byte {} {}'
    results = util.execute_cmd(scale_cmd.format(temp_filename1,
                                                temp_filename2))
    if results['status'] != 0:
        return results['status']

    # apply compression
    browse_filename = os.path.join(workdir, tile_id + '.tif')
    comp_cmd = 'gdal_translate -co COMPRESS=JPEG -co PHOTOMETRIC=YCBCR {} {}'
    results = util.execute_cmd(comp_cmd.format(temp_filename2,
                                               browse_filename))
    if results['status'] != 0:
        # The browse generation failed on the HSM.
        # Wait a short period and try again.
        logger.warning('gdal_translate failed to create the browse.  '
                       'Trying again.')
        time.sleep(10)
        results = util.execute_cmd(
            comp_cmd.format(temp_filename2, browse_filename))
        if results['status'] != 0:
            return results['status']

    # internal pyramids
    addo_cmd = 'gdaladdo {} 2 4 8 16'
    results = util.execute_cmd(addo_cmd.format(browse_filename))
    if results['status'] != 0:
        # The pyramid generation failed on the HSM.
        # Wait a short period and try again.
        logger.warning('gdaladdo failed to create the pyramids.  '
                       'Trying again.')
        time.sleep(10)
        results = util.execute_cmd(addo_cmd.format(browse_filename))
        if results['status'] != 0:
            return results['status']

    # Copy the browse to the output location, and verify using checksums.
    shutil.copyfile(browse_filename, output_browse_filename)
    if (util.checksum_md5(browse_filename) !=
            util.checksum_md5(output_browse_filename)):
        logger.warning('%s checksums do not match.',
                       os.path.basename(browse_filename))
        os.remove(output_browse_filename)
        return 1
    else:
        logger.info('%s checksums match.', os.path.basename(browse_filename))

    util.remove(temp_filename1, temp_filename2, browse_filename + '.aux.xml',
                browse_filename)

    logger.info('    End building browse.')
    return 0
Пример #53
0
    def __save_win(self, winid, ctype, pids_data, ctime, rollback):

        # print (self,winid,ctype,pids_data,ctime,rollback)

        errors = []
        fname = os.path.join(self.basedir, self.savedir, "win_" + winid)
        if rollback[1]:

            #time=linecache.getline(rollback[0],2).strip()
            #copy scrollback

            shutil.move(rollback[1], os.path.join(self.basedir, self.savedir,
                        "hardcopy." + winid))

        basedata_len = 8
        zombie_vector_pos = 8
        zombie_vector = linecache.getline(fname, zombie_vector_pos)

        f = open(fname, "a")
        # import changes from previous savefiles (run with non-restarted windows still running primer)
        if rollback[0]:
            print('Rolling back to primer data in windows running primer')
            rollback_dir = rollback[2]
            target = rollback[0]
            fr = open(target, 'r')
            last_sep = 1
            for (i, line) in enumerate(fr.readlines()[basedata_len:]):
                f.write(line)
                if line == "-\n":
                    last_sep = i
                elif i - last_sep == 6 and line.startswith('vim_'):

                    #import vim files but also update the window number in a filename

                    for filename in glob.glob(os.path.join(rollback_dir,
                            line.strip() + '_*')):
                        try:
                            tvim = "vim_W%s_%s" % (winid, os.path.basename(filename).split("_",
                                    2)[2])
                            tvim = os.path.join(self.basedir, self.savedir,
                                    tvim)
                            shutil.move(filename, tvim)
                        except:
                            errors.append('Unable to rollback vim: %s' %
                                    filename)
                elif i - last_sep == 6 and line.startswith('shellvars_'):
                    print('shellvars')

                    #import shellvars files but also update the window number in the datafile

                    filename = os.path.join(rollback_dir, line.strip())
                    try:
                        tshellvars = "shellvars_W%s_%s" % (winid, os.path.basename(filename).split("_",2)[2])
                        print tshellvars
                        tshellvars = os.path.join(self.basedir, self.savedir,
                                tshellvars)
                        shutil.move(filename, tshellvars)
                        print filename,tshellvars
                    except:
                        errors.append('Unable to rollback shellvars: %s' %
                                filename)
            util.remove(target)
        else:
            pids_data_len = "1"
            if pids_data:
                pids_data_len = str(len(pids_data) + 1)
            f.write(pids_data_len + '\n')
            f.write("-\n")
            f.write("-1\n")
            f.write(zombie_vector)
            f.write("%d\n" % (len(zombie_vector.split('\x00')) - 1))
            f.write(zombie_vector)
            f.write("-1\n")
            f.write("-1\n")
            if pids_data:
                for pid in pids_data:
                    f.write("-\n")
                    for (i, data) in enumerate(pid):
                        if i == 2:
                            if data.endswith('\0\0'):
                                data = data[:len(data) - 1]
                            f.write(str(len(data.split('\x00')) - 1) +
                                    '\n')
                            f.write(str(data) + '\n')
                        else:
                            f.write(str(data) + '\n')
            f.write(ctime)
        f.close()
        return errors
Пример #54
0
def flatten(computations):
    for c in computations:
        if isinstance(c, CompositeComputation):
            return tuple(remove(c.__eq__, computations)) + tuple(c.computations)
    return computations
Пример #55
0
            rollback = (None, None, None)
        out("")

        # remove ignored scrollbacks

        if 'all' in self.scroll:
            for f in glob.glob(os.path.join(findir, "hardcopy.*")):
                open(f, "w")
        elif self.scroll:
            import tools
            (scroll_groups, scroll_wins) = tools.subwindows(self.pid, sc.datadir,
                    self.scroll)
            out('Scrollback excluded groups: %s' % str(scroll_groups))
            out('All scrollback excluded windows: %s' % str(scroll_wins))
            for w in scroll_wins:
                util.remove(os.path.join(findir, "hardcopy.%s" % w))

        # remove ignored windows

        if self.excluded:
            import tools
            (excluded_groups, excluded_wins) = tools.subwindows(self.pid,
                    sc.datadir, self.excluded)
            out('Excluded groups: %s' % str(excluded_groups))
            out('All excluded windows: %s' % str(excluded_wins))
            bpath1 = os.path.join(findir, "win_")
            bpath2 = os.path.join(findir, "hardcopy.")
            bpath3 = os.path.join(findir, "vim_W")
            bpath4 = os.path.join(findir, "shellvars_W")
            for win in excluded_wins:
                util.remove(bpath1 + win)
Пример #56
0
	def no_timer(self, child):
		if child in self.timer_interest:
			remove(child, self.timer_interest)
			if not self.timer_interest:
				self.parent.no_timer(self)
Пример #57
0
            rollback = (None, None, None)
        out("")

        # remove ignored scrollbacks

        if "all" in self.scroll:
            for f in glob.glob(os.path.join(findir, "hardcopy.*")):
                open(f, "w")
        elif self.scroll:
            import tools

            (scroll_groups, scroll_wins) = tools.subwindows(self.pid, sc.datadir, self.scroll)
            out("Scrollback excluded groups: %s" % str(scroll_groups))
            out("All scrollback excluded windows: %s" % str(scroll_wins))
            for w in scroll_wins:
                util.remove(os.path.join(findir, "hardcopy.%s" % w))

        # remove ignored windows

        if self.excluded:
            import tools

            (excluded_groups, excluded_wins) = tools.subwindows(self.pid, sc.datadir, self.excluded)
            out("Excluded groups: %s" % str(excluded_groups))
            out("All excluded windows: %s" % str(excluded_wins))
            bpath1 = os.path.join(findir, "win_")
            bpath2 = os.path.join(findir, "hardcopy.")
            bpath3 = os.path.join(findir, "vim_W")
            bpath4 = os.path.join(findir, "shellvars_W")
            for win in excluded_wins:
                util.remove(bpath1 + win)