示例#1
0
def readMetadata_SemanticDB(metadata_file):
    print '\n' + '*' * 40
    print 'PROCEEDING TO READ METADATA (SEMANTIC DB VIRTUOSO):'
    print '*' * 40
    workingDir = os.getcwd()
    print 'Working Dir: ', workingDir
    print 'Full Path Metadata File: ' + workingDir + '\\' + metadata_file
    xmldoc = minidom.parse(metadata_file)
    semanticDB_branch = xmldoc.getElementsByTagName('SemanticDB_Virtuoso')

    semanticDB_dict = {}

    for semanticDB_branch in xmldoc.getElementsByTagName(
            'SemanticDB_Virtuoso'):
        semanticDB_http_location = semanticDB_branch.getElementsByTagName(
            'http_location')[0].firstChild.nodeValue
        semanticDB_port = semanticDB_branch.getElementsByTagName(
            'port')[0].firstChild.nodeValue
        semanticDB_folder_endPoint = semanticDB_branch.getElementsByTagName(
            'folder_end_point')[0].firstChild.nodeValue

    # remove whitespaces from xml
    semanticDB_http_location = re.sub(r'\s+', '', semanticDB_http_location)
    semanticDB_port = re.sub(r'\s+', '', semanticDB_port)
    semanticDB_folder_endPoint = re.sub(r'\s+', '', semanticDB_folder_endPoint)

    semanticDB_dict['http_location'] = semanticDB_http_location
    semanticDB_dict['port'] = semanticDB_port
    semanticDB_dict['folder_end_point'] = semanticDB_folder_endPoint

    print semanticDB_dict
    return semanticDB_dict
示例#2
0
def _git_sync(host, remote=None, dry=False):
    cwd = os.getcwd()
    relpwd = relpath(cwd, expanduser('~'))

    parts = [
        'git commit -am "wip"',
    ]

    if remote:
        parts += [
            'git push {remote}',
            'ssh {host} "cd {relpwd} && git pull {remote}"'
        ]
    else:
        parts += ['git push', 'ssh {host} "cd {relpwd} && git pull"']

    kw = dict(host=host, relpwd=relpwd, remote=remote)

    for part in parts:
        command = part.format(**kw)
        if not dry:
            result = ub.cmd(command, verbose=2)
            retcode = result['ret']
            if command.startswith('git commit') and retcode == 1:
                pass
            elif retcode != 0:
                print('git-sync cannot continue. retcode={}'.format(retcode))
                break
        else:
            print(command)
def readMetadata_SemanticDB(metadata_file):
    print '\n' + '*'*40
    print 'PROCEEDING TO READ METADATA (SEMANTIC DB VIRTUOSO):'
    print '*'*40
    workingDir=os.getcwd()
    print 'Working Dir: ', workingDir
    print 'Full Path Metadata File: ' + workingDir + '\\' + metadata_file
    xmldoc = minidom.parse(metadata_file)    
    semanticDB_branch = xmldoc.getElementsByTagName('SemanticDB_Virtuoso') 
    
    semanticDB_dict = {}
    
    for semanticDB_branch in xmldoc.getElementsByTagName('SemanticDB_Virtuoso'):    
        semanticDB_http_location = semanticDB_branch.getElementsByTagName('http_location')[0].firstChild.nodeValue
        semanticDB_port = semanticDB_branch.getElementsByTagName('port')[0].firstChild.nodeValue
        semanticDB_folder_endPoint = semanticDB_branch.getElementsByTagName('folder_end_point')[0].firstChild.nodeValue
        
    # remove whitespaces from xml
    semanticDB_http_location = re.sub(r'\s+', '', semanticDB_http_location)
    semanticDB_port = re.sub(r'\s+', '', semanticDB_port)
    semanticDB_folder_endPoint = re.sub(r'\s+', '', semanticDB_folder_endPoint)

    semanticDB_dict['http_location'] = semanticDB_http_location
    semanticDB_dict['port'] = semanticDB_port
    semanticDB_dict['folder_end_point'] = semanticDB_folder_endPoint
    
    print semanticDB_dict
    return semanticDB_dict   
示例#4
0
    def loadOtherProperties(cls):
        '''
           include properties functionality implementation
           WARNING: adding grinder.thread, grinder.runs, grinder.process, grinder.jvm will not work in include files
                   more generally keys beginning with "grinder." prefix should be avoid because this special keys are bootstrapped in Java.
            TODO: make it recursive (low priority)
        :param cls: current class
        '''

        currentPath = os.getcwd()
        logger.trace('Initial properties: "%s"' % (properties))

        # kept one from original property file (not beginning with include keyword)
        otherKeys = GrinderProperties()

        # separating include from other keys
        includeKeys = []
        noIncludeSoFar = True
        for key in properties.keys():
            if key.lower().startswith('include'):
                includeKeys.append(key)
                noIncludeSoFar = False
                continue
            otherKeys.setProperty(key, properties.getProperty(key))

        if noIncludeSoFar:
            logger.trace(
                'No "include" keywords in the property file, returning properties unchanged'
            )
            return properties

        # final properties
        newProps = GrinderProperties()

        # include, include2, include3 ... we have to order
        for key in sorted(includeKeys):
            # TODO: manage case where value is a PATH
            filepath = '%s%s%s' % (currentPath, os.sep,
                                   properties.getProperty(key).strip())
            if not os.path.exists(filepath):
                logger.error(
                    'loadOtherProperties() - include file "%s" does not exists'
                    % (filepath))
                raise SyntaxError(
                    'loadOtherProperties() - include file "%s" does not exists'
                    % (filepath))
            try:
                logger.trace('loadOtherProperties() - Loading %s' % (filepath))
                newProps.load(FileInputStream(filepath))
            except Exception, e:
                logger.error(
                    'loadOtherProperties(): exception raised, reason: %s' %
                    (e))
                raise e
示例#5
0
def readMetadata_sources(metadata_file):
    print '\n' + '*' * 40
    print 'PROCEEDING TO READ METADATA (SOURCES):'
    print '*' * 40
    workingDir = os.getcwd()
    print 'Working Dir: ', workingDir
    print 'Full Path Metadata File: ' + workingDir + '\\' + metadata_file
    xmldoc = minidom.parse(metadata_file)
    data_sources = xmldoc.getElementsByTagName('source_name')

    sources_all = []
    for s in data_sources:
        source_dict = {}
        source_name = s.attributes['name'].value
        source_location = s.getElementsByTagName(
            'location')[0].firstChild.nodeValue
        source_query_type = s.getElementsByTagName(
            'query_type')[0].firstChild.nodeValue

        # remove whitespaces from xml
        source_name = re.sub(r'\s+', '', source_name)
        source_location = re.sub(r'\s+', '', source_location)
        source_query_type = re.sub(r'\s+', '', source_query_type)

        source_dict['source_name'] = source_name
        source_dict['location'] = source_location
        source_dict['query_type'] = source_query_type

        attributes = []
        attribute_list = s.getElementsByTagName('attr_property')
        for attr in attribute_list:
            field_in_schema = attr.attributes['name'].value
            fields_in_source = attr.childNodes[0].nodeValue
            field_in_schema = re.sub(r'\s+', '', field_in_schema)
            fields_in_source = re.sub(r'\s+', '', fields_in_source)
            attributes.append(field_in_schema)
            attributes.append(fields_in_source)
            source_dict['attributes'] = attributes
        sources_all.append(source_dict)

    print sources_all
    return sources_all
示例#6
0
def readMetadata_mappings(metadata_file):
    print '\n' + '*' * 40
    print 'PROCEEDING TO READ METADATA (MAPPINGS):'
    print '*' * 40
    workingDir = os.getcwd()
    xmldoc = minidom.parse(metadata_file)
    field_mappings = xmldoc.getElementsByTagName('uri_property')

    global_concept_mappings_dict = {}
    for single_mapping in field_mappings:
        global_concept_name = single_mapping.attributes['name'].value
        global_concept_uri = single_mapping.childNodes[0].nodeValue
        global_concept_name = re.sub(r'\s+', '', global_concept_name)
        global_concept_uri = re.sub(r'\s+', '', global_concept_uri)
        global_concept_mappings_dict[global_concept_name] = global_concept_uri
    #print global_concept_mappings_dict
    #print json.dumps(global_concept_mappings_dict, indent=2)

    print json.dumps(global_concept_mappings_dict, indent=2)
    print 'Mappings done'
    return global_concept_mappings_dict
def readMetadata_mappings(metadata_file):
    print '\n' + '*'*40
    print 'PROCEEDING TO READ METADATA (MAPPINGS):'
    print '*'*40
    workingDir=os.getcwd()
    xmldoc = minidom.parse(metadata_file)    
    field_mappings = xmldoc.getElementsByTagName('uri_property') 

    global_concept_mappings_dict = {}    
    for single_mapping in field_mappings :
        global_concept_name = single_mapping.attributes['name'].value
        global_concept_uri = single_mapping.childNodes[0].nodeValue
        global_concept_name = re.sub(r'\s+', '', global_concept_name)
        global_concept_uri = re.sub(r'\s+', '', global_concept_uri)                                                
        global_concept_mappings_dict[global_concept_name] =  global_concept_uri
    #print global_concept_mappings_dict
    #print json.dumps(global_concept_mappings_dict, indent=2)

    print json.dumps(global_concept_mappings_dict, indent=2)
    print 'Mappings done'
    return global_concept_mappings_dict
def readMetadata_sources(metadata_file):
    print '\n' + '*'*40
    print 'PROCEEDING TO READ METADATA (SOURCES):'
    print '*'*40
    workingDir=os.getcwd()
    print 'Working Dir: ', workingDir
    print 'Full Path Metadata File: ' + workingDir + '\\' + metadata_file
    xmldoc = minidom.parse(metadata_file)    
    data_sources = xmldoc.getElementsByTagName('source_name') 

    sources_all = []    
    for s in data_sources :
        source_dict = {}
        source_name = s.attributes['name'].value  
        source_location = s.getElementsByTagName('location')[0].firstChild.nodeValue
        source_query_type = s.getElementsByTagName('query_type')[0].firstChild.nodeValue        
        
        # remove whitespaces from xml
        source_name = re.sub(r'\s+', '', source_name)
        source_location = re.sub(r'\s+', '', source_location)
        source_query_type = re.sub(r'\s+', '', source_query_type)
                
        source_dict['source_name'] = source_name
        source_dict['location'] = source_location
        source_dict['query_type'] = source_query_type
        
        attributes = []        
        attribute_list = s.getElementsByTagName('attr_property')
        for attr in attribute_list:
            field_in_schema = attr.attributes['name'].value
            fields_in_source = attr.childNodes[0].nodeValue
            field_in_schema = re.sub(r'\s+', '', field_in_schema)
            fields_in_source = re.sub(r'\s+', '', fields_in_source)                                                
            attributes.append(field_in_schema)
            attributes.append(fields_in_source)        
            source_dict['attributes'] =  attributes
        sources_all.append(source_dict)
    
    print sources_all
    return sources_all   
示例#9
0
 def setUp(self):
     #chdir to test dir so that relative resource files are resolved
     # even when test is run from an other directory
     self.previous_workdir=os.getcwd();
     os.chdir(os.path.dirname(__file__))
示例#10
0
# Open a File
file = open(file="text.txt", mode='wb', buffering=1)
print("Name of the title: ", file.name)
print("Closed or not: ", file.closed)
print("Opening mode: ", file.mode)
file.write(
    bytes("I am not a psychopath, I am a high functioning sociopath.\n",
          "UTF-8"))
file.close()

file = open(file="text.txt", mode="r", buffering=1)
print("Position: ",
      file.tell())  # Tell the position of the current seeking point.
data = file.read()  # Read all bytes
file.close()
print("The read data is: ", data)
print("Closed or not: ", file.closed)

# Use the OS path library.
os.rename("text.txt", "newText.txt")  # Rename the file name.
os.remove("newText.txt")  # Remove the file with this file name.
os.mkdir("folder", mode=0o777)
os.chdir("folder")
# Changes the current directory with this.
os.mkdir("test")  # Makes folder in folder with name test.

print("The current dir: ", os.getcwd())  # Prints the current directory path.
os.rmdir("folder")
# Removes the directory.
from os.path import expanduser, os
import time, subprocess, wget, os
print('SCRIPT BY ANDREI ZHUK')
time.sleep(1)
print('https://vk.com/andrey47785')
#НЕ ВОЛНУЙТЕСЬ ПО ПОВОДУ ТОГО,ЧТО МОДУЛИ НЕ БУДУТ ИМПОРТИРОВАТСЯ ПОТОМУЧТО ЗА ВАС ЭТО СДЕЛАЕТ И ЗАПАКУЕТ В ПРОГРАММУ МОДУЛИ ПАЙ ИНСТАЛЛ
home = expanduser("~")
time.sleep(1.56)
print('Закачиваю файлы...')
time.sleep(1)
print('.\\WHAIT//.')
s = 'https://www.python.org/ftp/python/3.8.7/python-3.8.7rc1-amd64.exe'
filename = wget.download(s)
os.rename(filename, u'' + os.getcwd() + '/' + filename)
subprocess.call([home + '/Desktop/python-3.9.1-amd64.exe'])
print('OK')
os.remove(home + '/Desktop/python-3.9.1-amd64.exe')
print('DELETED TO INSTALATOR')
time.sleep(3)
print('УДАЛЯЮ СЕБЯ...')
os.remove(home + '/Desktop/installpythonby.exe')
def MOVIELENS_SOURCE_Load_from_Disk_TO_Memory_in_triples_format(in_source, in_metadata_mappings, in_metadata_content):
    print 'FIRING MOVIELENS'
    # harcoded name of keys to look in the metadata because Movielens delivers a data file, no JSON or similar
    Reification_RatingMovie_relation = 'USER_RATING_MOVIE_Relation_RatingMovie'
    Reification_RatingUser_relation = 'USER_RATING_MOVIE_Relation_RatingUser'
    Reification_RatingNumber_relation = 'USER_RATING_MOVIE_Relation_RatingNumber'

    workingDir=os.getcwd()
    
    print in_source['location']
    # get information from the source (WEB CONNECTION)
    movielens_conn = movielens_connector (in_source['location'])
    
    print '\nMOVIELENS RESPONSE: ..... '
    movieLens_prefs_UserRatings = {}
    movieLens_prefs_UserRatings = movielens_conn.load_parse_MovieLens2015_UserRatings(workingDir + '/' + in_source['location'])
    # returns a dictionary like this:
    #'Jaime': {'Predator 1': 5, 'Predator 2':4.5},    
    #'Jesulin Ubrique': {'Toros 1': 1, 'Toros 2':2, 'Toros 3': 3, 'Toros 4': 4},

    
    # get mapping (source_field_name -> global_schema_field_name)            
    print '\nMAPPING WITH METADATA: ..... '
    all_triples = []
    global last_RatingIDNUM_in_System
    last_RatingIDNUM_in_System = 1

    #for user_key in movieLens_prefs_UserRatings:
    for user_key in sorted(movieLens_prefs_UserRatings):
        #print 'user_key:  ', user_key
        #print 'user_ALL_movie_vs_ratings: ', movieLens_prefs_UserRatings[user_key]
        user_ALL_movie_vs_ratings = movieLens_prefs_UserRatings[user_key]
        
        # iterate over the ratings of this user
    #if user_key == 'Jaime':
        #for movie_rating in user_ALL_movie_vs_ratings:
        for movie_rating in sorted(user_ALL_movie_vs_ratings):
            #property_URI = sourceField_2_globalSchemaURI(source_item, in_source['source_name'], in_metadata_mappings, in_metadata_content)
            #property_URI = 'https://schema.org/Rating' 
            #property_URI_RatingMovie   = 'https://schema.org/RatingMovie' # harcoded because Movielens delivers a data file, no JSON or similar
            #property_URI_RatingUser    = '******' 
            #property_URI_RatingLiteral = 'https://schema.org/Rating' 
            property_URI_RatingMovie   = in_metadata_mappings[Reification_RatingMovie_relation]   #'https://schema.org/RatingMovie' #     
            property_URI_RatingUser    = in_metadata_mappings[Reification_RatingUser_relation]   #'https://schema.org/RatingUser' 
            property_URI_RatingLiteral = in_metadata_mappings[Reification_RatingNumber_relation]   #'https://schema.org/Rating' 
            
            propertyURI_TypeOf = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'
            propertyURI_TernaryRating_USER_MOVIE_RATING = 'https://schema.org/Rating_UserMovieRating'
            
            
            
            URI_USER = '******' + user_key
            URI_PROPERTY_USERRATINGMOVIE_RATING_CLASS = 'URI_Property_UserRatingMovie_RATING_' + str(last_RatingIDNUM_in_System)

            #print 'MOVIE:                             ' + movie_rating
            #print 'USER:                              '******'URI FICTICIA USER_RATING_MOVIE:    ' + URI_PROPERTY_USERRATINGMOVIE_RATING_CLASS
            #print 
            
            # build single tripple and append it with the others
            single_triple = []
            #<URI_Property_UserRatingMovie_RATING_1> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <https://schema.org/Rating_UserMovieRating>
            single_triple.append(URI_PROPERTY_USERRATINGMOVIE_RATING_CLASS)
            single_triple.append(propertyURI_TypeOf)
            single_triple.append(propertyURI_TernaryRating_USER_MOVIE_RATING)
            all_triples.append(single_triple)
            
            
            single_triple = []
            # <URI_Property_UserRatingMovie_RATING1> <https://schema.org/Rating> <Predator_(1989)_MOVIELENS_source>
            URI_PROPERTY_USERRATINGMOVIE_RATING_CLASS
            film_URI = movie_rating.replace(' ', '_') # URI crashes in SPARQL is blanks exist
        
            #print movie_rating[0:3]
            #print film_URI
                        
            film_URI += '_' + in_source['source_name']
            #print film_URI
            single_triple.append(URI_PROPERTY_USERRATINGMOVIE_RATING_CLASS)
            single_triple.append(property_URI_RatingMovie)
            single_triple.append(film_URI)
            all_triples.append(single_triple)
            
            #<URI_Property_UserRatingMovie_RATING1> <https://schema.org/Rating> 4.5                
            single_triple = []
            single_triple.append(URI_PROPERTY_USERRATINGMOVIE_RATING_CLASS)
            single_triple.append(property_URI_RatingLiteral)
            single_triple.append(user_ALL_movie_vs_ratings[movie_rating])
            all_triples.append(single_triple)
            
            #<URI_Property_UserRatingMovie_RATING1> <https://schema.org/Rating> <URI_MOVIELENS_USER_JAIME>
            single_triple = []
            single_triple.append(URI_PROPERTY_USERRATINGMOVIE_RATING_CLASS)
            single_triple.append(property_URI_RatingUser)
            single_triple.append(URI_USER)
            all_triples.append(single_triple)
            
            last_RatingIDNUM_in_System+=1
    #print all_triples
    print 
        
    '''
    if property_URI != SOURCE_ITEM_NOT_IN_GLOBAL_SCHEMA:     
        # build single tripple and append it with the others
        single_triple = []
        film_URI = in_film_to_search.replace(' ', '_') # URI crashes in SPARQL is blanks exist
        film_URI += '_' + in_source['source_name']
        single_triple.append(film_URI)
        single_triple.append(property_URI)
        single_triple.append(source_item_value)
        all_triples.append(single_triple)
        # all_triples contains all triples (each is lists of 3 elements) from the HTTP source

        '''
    #print 'PRINTING ALL RESULTING TRIPLES'
    #print all_triples                           
    #print all_triples[1]
    #for i in range(0,len(all_triples)): 
    #    print all_triples[i][0] + ' - ' + all_triples[i][1] + ' - ' + all_triples[i][2]


    # if movie was not found, return empty list    
    return all_triples
示例#13
0
def bootstrapLibrairies():
    if grinder:
        properties = CoreGrinder.getProperties()
        logger = grinder.logger

        # Copy all project files before loading Jar dependencies
        addProjectFiles(properties, logger)

        # A helper class to add libraries dynamically on the classloader
        cl = classPathLoader()

        ########## BOOTSTRAP CORE LIBRARIES #######
        currdir = properties.getProperty(
            'grindertool.core.lib') or '%s%s%s' % (normpath(
                os.getcwd()), os.sep, 'libs%score' % (os.sep))
        if exists(currdir):
            logger.info(
                '[agent=%d][processNumber=%d] Loading core librairies %s' %
                (grinder.agentNumber, grinder.processNumber, currdir))
            cl.addDirectory(currdir)
        else:
            logger.error(
                'Configuration error: Core libs directory not found in location %s'
                % (currdir))
            raise SyntaxError(
                'Configuration error: Core libs directory not found in location %s'
                % (currdir))

        #### OTHER LIBRARIES present under libs ####
        currdir = '%s%s%s' % (normpath(os.getcwd()), os.sep, 'libs')
        if exists(currdir):
            for a_dir in os.listdir(currdir):
                if a_dir not in ('core'):
                    logger.info(
                        '[agent=%d][processNumber=%d] Loading librairies under %s'
                        % (grinder.agentNumber, grinder.processNumber, a_dir))
                    cl.addDirectory('%s%s%s' % (currdir, os.sep, a_dir))

        ########## BOOTSTRAP SMPP ###############
        #
        # Must be load only if smpp is started
        if properties.getBoolean('grindertool.smsc.start', False):
            smpp_dir = properties.getProperty('grindertool.smsc.lib') or None
            if not smpp_dir:
                logger.error(
                    'Please set required parameter: grindertool.smsc.lib')
                raise SyntaxError(
                    'Please set required parameter: grindertool.smsc.lib')
            if not os.path.exists(smpp_dir):
                logger.error(
                    'Smpp libraries directory (grindertool.smsc.lib) %s does not exist !'
                    % (smpp_dir))
                raise SyntaxError(
                    'Smpp libraries directory (grindertool.smsc.lib) %s does not exist !'
                    % (smpp_dir))
            logger.info(
                '[agent=%d][processNumber=%d] Loading SMSC librairies %s ...' %
                (grinder.agentNumber, grinder.processNumber, smpp_dir))
            print '%s [agent=%d][processNumber=%d] Loading SMSC librairies %s ...' % (
                str(datetime.datetime.now()), grinder.agentNumber,
                grinder.processNumber, smpp_dir)
            cl = classPathLoader()
            cl.addDirectory(smpp_dir)
    # else:
    #     logger.info('No SMSC libraries loaded')
    ######################################

        if grinder.agentNumber == -1:
            print 'You are in STANDALONE mode (no console at all)'
            if properties.getBoolean('grindertool.smsc.start', False):
                print '\tSMSC logs location: %s%slog' % (''.join(
                    smpp_dir.split(os.sep)[:-2]
                    or smpp_dir.split('/')[:-2]), os.sep)
            print '\tGrindertool logs: %s' % (
                properties.getProperty('grinder.logDirectory'))
示例#14
0
def _dirstats(dpath=None):  # nocover
    """
    Testing helper for printing directory information
    (mostly for investigating windows weirdness)
    """
    from ubelt import util_colors
    if dpath is None:
        dpath = os.getcwd()
    print('===============')
    print('Listing for dpath={}'.format(dpath))
    print('E L F D J - path')
    print('--------------')
    if not os.path.exists(dpath):
        print('... does not exist')
        return
    paths = sorted(os.listdir(dpath))
    for path in paths:
        full_path = join(dpath, path)
        E = os.path.exists(full_path)
        L = os.path.islink(full_path)
        F = os.path.isfile(full_path)
        D = os.path.isdir(full_path)
        J = util_platform.WIN32 and _win32_links._win32_is_junction(full_path)
        ELFDJ = [E, L, F, D, J]
        if ELFDJ == [1, 0, 0, 1, 0]:
            # A directory
            path = util_colors.color_text(path, 'green')
        elif ELFDJ == [1, 0, 1, 0, 0]:
            # A file (or a hard link they are indistinguishable with one query)
            path = util_colors.color_text(path, 'white')
        elif ELFDJ == [1, 0, 0, 1, 1]:
            # A directory junction
            path = util_colors.color_text(path, 'yellow')
        elif ELFDJ == [1, 1, 1, 0, 0]:
            # A file link
            path = util_colors.color_text(path, 'brightgreen')
        elif ELFDJ == [1, 1, 0, 1, 0]:
            # A directory link
            path = util_colors.color_text(path, 'brightcyan')
        elif ELFDJ == [0, 1, 0, 0, 0]:
            # A broken file link
            path = util_colors.color_text(path, 'red')
        elif ELFDJ == [0, 1, 0, 1, 0]:
            # A broken directory link
            path = util_colors.color_text(path, 'darkred')
        elif ELFDJ == [0, 0, 0, 1, 1]:
            # A broken directory junction
            path = util_colors.color_text(path, 'purple')
        elif ELFDJ == [1, 0, 1, 0, 1]:
            # A file junction? Thats not good.
            # I guess this is a windows 7 thing?
            path = util_colors.color_text(path, 'red')
        elif ELFDJ == [1, 1, 0, 0, 0]:
            # Windows? Why? What does this mean!?
            # A directory link that cant be resolved?
            path = util_colors.color_text(path, 'red')
        else:
            print('dpath = {!r}'.format(dpath))
            print('path = {!r}'.format(path))
            raise AssertionError(str(ELFDJ) + str(path))
        line = '{E:d} {L:d} {F:d} {D:d} {J:d} - {path}'.format(**locals())
        if os.path.islink(full_path):
            line += ' -> ' + os.readlink(full_path)
        elif _win32_links is not None:
            if _win32_links._win32_is_junction(full_path):
                line += ' => ' + _win32_links._win32_read_junction(full_path)
        print(line)
示例#15
0
def MOVIELENS_SOURCE_Load_from_Disk_TO_Memory_in_triples_format(
        in_source, in_metadata_mappings, in_metadata_content):
    print 'FIRING MOVIELENS'
    # harcoded name of keys to look in the metadata because Movielens delivers a data file, no JSON or similar
    Reification_RatingMovie_relation = 'USER_RATING_MOVIE_Relation_RatingMovie'
    Reification_RatingUser_relation = 'USER_RATING_MOVIE_Relation_RatingUser'
    Reification_RatingNumber_relation = 'USER_RATING_MOVIE_Relation_RatingNumber'

    workingDir = os.getcwd()

    print in_source['location']
    # get information from the source (WEB CONNECTION)
    movielens_conn = movielens_connector(in_source['location'])

    print '\nMOVIELENS RESPONSE: ..... '
    movieLens_prefs_UserRatings = {}
    movieLens_prefs_UserRatings = movielens_conn.load_parse_MovieLens2015_UserRatings(
        workingDir + '/' + in_source['location'])
    # returns a dictionary like this:
    #'Jaime': {'Predator 1': 5, 'Predator 2':4.5},
    #'Jesulin Ubrique': {'Toros 1': 1, 'Toros 2':2, 'Toros 3': 3, 'Toros 4': 4},

    # get mapping (source_field_name -> global_schema_field_name)
    print '\nMAPPING WITH METADATA: ..... '
    all_triples = []
    global last_RatingIDNUM_in_System
    last_RatingIDNUM_in_System = 1

    #for user_key in movieLens_prefs_UserRatings:
    for user_key in sorted(movieLens_prefs_UserRatings):
        #print 'user_key:  ', user_key
        #print 'user_ALL_movie_vs_ratings: ', movieLens_prefs_UserRatings[user_key]
        user_ALL_movie_vs_ratings = movieLens_prefs_UserRatings[user_key]

        # iterate over the ratings of this user
        #if user_key == 'Jaime':
        #for movie_rating in user_ALL_movie_vs_ratings:
        for movie_rating in sorted(user_ALL_movie_vs_ratings):
            #property_URI = sourceField_2_globalSchemaURI(source_item, in_source['source_name'], in_metadata_mappings, in_metadata_content)
            #property_URI = 'https://schema.org/Rating'
            #property_URI_RatingMovie   = 'https://schema.org/RatingMovie' # harcoded because Movielens delivers a data file, no JSON or similar
            #property_URI_RatingUser    = '******'
            #property_URI_RatingLiteral = 'https://schema.org/Rating'
            property_URI_RatingMovie = in_metadata_mappings[
                Reification_RatingMovie_relation]  #'https://schema.org/RatingMovie' #
            property_URI_RatingUser = in_metadata_mappings[
                Reification_RatingUser_relation]  #'https://schema.org/RatingUser'
            property_URI_RatingLiteral = in_metadata_mappings[
                Reification_RatingNumber_relation]  #'https://schema.org/Rating'

            propertyURI_TypeOf = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'
            propertyURI_TernaryRating_USER_MOVIE_RATING = 'https://schema.org/Rating_UserMovieRating'

            URI_USER = '******' + user_key
            URI_PROPERTY_USERRATINGMOVIE_RATING_CLASS = 'URI_Property_UserRatingMovie_RATING_' + str(
                last_RatingIDNUM_in_System)

            #print 'MOVIE:                             ' + movie_rating
            #print 'USER:                              '******'URI FICTICIA USER_RATING_MOVIE:    ' + URI_PROPERTY_USERRATINGMOVIE_RATING_CLASS
            #print

            # build single tripple and append it with the others
            single_triple = []
            #<URI_Property_UserRatingMovie_RATING_1> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <https://schema.org/Rating_UserMovieRating>
            single_triple.append(URI_PROPERTY_USERRATINGMOVIE_RATING_CLASS)
            single_triple.append(propertyURI_TypeOf)
            single_triple.append(propertyURI_TernaryRating_USER_MOVIE_RATING)
            all_triples.append(single_triple)

            single_triple = []
            # <URI_Property_UserRatingMovie_RATING1> <https://schema.org/Rating> <Predator_(1989)_MOVIELENS_source>
            URI_PROPERTY_USERRATINGMOVIE_RATING_CLASS
            film_URI = movie_rating.replace(
                ' ', '_')  # URI crashes in SPARQL is blanks exist

            #print movie_rating[0:3]
            #print film_URI

            film_URI += '_' + in_source['source_name']
            #print film_URI
            single_triple.append(URI_PROPERTY_USERRATINGMOVIE_RATING_CLASS)
            single_triple.append(property_URI_RatingMovie)
            single_triple.append(film_URI)
            all_triples.append(single_triple)

            #<URI_Property_UserRatingMovie_RATING1> <https://schema.org/Rating> 4.5
            single_triple = []
            single_triple.append(URI_PROPERTY_USERRATINGMOVIE_RATING_CLASS)
            single_triple.append(property_URI_RatingLiteral)
            single_triple.append(user_ALL_movie_vs_ratings[movie_rating])
            all_triples.append(single_triple)

            #<URI_Property_UserRatingMovie_RATING1> <https://schema.org/Rating> <URI_MOVIELENS_USER_JAIME>
            single_triple = []
            single_triple.append(URI_PROPERTY_USERRATINGMOVIE_RATING_CLASS)
            single_triple.append(property_URI_RatingUser)
            single_triple.append(URI_USER)
            all_triples.append(single_triple)

            last_RatingIDNUM_in_System += 1
    #print all_triples
    print
    '''
    if property_URI != SOURCE_ITEM_NOT_IN_GLOBAL_SCHEMA:     
        # build single tripple and append it with the others
        single_triple = []
        film_URI = in_film_to_search.replace(' ', '_') # URI crashes in SPARQL is blanks exist
        film_URI += '_' + in_source['source_name']
        single_triple.append(film_URI)
        single_triple.append(property_URI)
        single_triple.append(source_item_value)
        all_triples.append(single_triple)
        # all_triples contains all triples (each is lists of 3 elements) from the HTTP source

        '''
    #print 'PRINTING ALL RESULTING TRIPLES'
    #print all_triples
    #print all_triples[1]
    #for i in range(0,len(all_triples)):
    #    print all_triples[i][0] + ' - ' + all_triples[i][1] + ' - ' + all_triples[i][2]

    # if movie was not found, return empty list
    return all_triples
示例#16
0
# 关闭打开的文件
fo.close()

fo = open(pathfile, "r+");

strRead = fo.read(30);
print (strRead)

strReada = fo.read();
print (strReada);

fo.close();
#remove()方法删除文件,需要提供要删除的文件名作为参数。
os.remove("foo1.txt");
#rename()方法需要两个参数,当前的文件名和新文件名。
os.renames("foo.txt", "foo1.txt");

#可以使用os模块的mkdir()方法在当前目录下创建新的目录们。你需要提供一个包含了要创建的目录名称的参数
os.mkdir("test");
#可以用chdir()方法来改变当前的目录。chdir()方法需要的一个参数是你想设成当前目录的目录名称。
os.chdir("test")
#getcwd()方法显示当前的工作目录。
# 给出当前的目录
print  (os.getcwd());


#os.removedirs("")


示例#17
0
#!/usr/bin/env python
#-*- encoding:Utf-8 -*-
from pip._vendor.distlib.compat import raw_input
from os.path import os
print("test");
'''str=raw_input("请输入:");print("你输入的内容是:",str);
str1=input("请输入");print("我输入的内容是:",str1);'''
fo=open("ai.py", "r", 1, "utf-8");
#open(file, mode, buffering, encoding, errors, newline, closefd, opener)
print("这个文件的文件名称是:",fo.mode);

fo1=open("testFile.py","wb+");
fo1.close();
fo2=open("testFile.py","wb");
fo2.close();
fo3=open("ai.py","a+",1,"utf-8");
fo3.write("我是追加的数据啊");#把数据追加到文件末尾
str=fo3.read(1);position=fo3.tell();print("我读取到的文件内容是个字符串是",str);
print("当前文件位置:",position);position=fo3.seek(0,0);newStr=fo3.read(10);print("把文件指针重新定义到开头:",position);
# print(fo1.name,fo2.name);os.rename("ai.py","buai.py");
print("我新读到文件内容是:",newStr);
#os.remove("C:\\Users\\Administrator\\Desktop\\test.txt");
print(fo3.name);print(os.getcwd());
fo3.close();