示例#1
0
def durum():
    eskihasler=list()

    dosyalar=ls('*.txt')
    klasor = getirklasor()
    for kdosyalar in klasor:

        x = ls(f"{kdosyalar}/*.txt")
        for j in x:
            içtext = j.split('\\')[-1]
            dosyalar.append(j)

    icerikler=ls('backup/içerikler/*')
    for icerikk in icerikler:
        eskihasler2= icerikk.split('\\')[-1]
        eskihasler.append(eskihasler2)

    for dosya in dosyalar:
        icerik=open(dosya).read()
        md5_hash = md5(icerik.encode('utf-8')).hexdigest()
        parse_md5 = md5_hash.split('\\')[-1]

        if(parse_md5 in eskihasler):
            print(f"{dosya} değişim olmamıştır")
        else:
            print(f"{dosya} değişiklik yapılmıştır ...")
示例#2
0
def validate( metadata_file_location, config = None, verbose = True ):

    #
    from os.path import isfile as exist
    from os.path import abspath,join,basename
    from os import pardir

    #
    run_dir = abspath( join( metadata_file_location, pardir ) )+'/'

    # The folder is valid if there is l=m=2 mode data in the following dirs
    status = len( ls( run_dir + '/Ylm_WEYLSCAL4::Psi4r_l2_m1_r*' ) ) > 0
    status = status or len( ls( run_dir + '/mp_WeylScal4::Psi4i_l2_m2_r*' ) ) > 0

    # Let the people know
    if not status:
        msg = 'waveform data could not be found.'
        if verbose: warning(msg,'maya.validate')

    # ignore directories with certain tags in filename
    ignore_tags = ['backup','old','archive']
    for tag in ignore_tags:
        status = status and not ( tag in run_dir )

    # ensure that file name is the same as the folder name
    a = basename(metadata_file_location).split(config.metadata_id)[0]
    b = parent(metadata_file_location)
    status = status and (  a in b  )

    #
    return status
示例#3
0
def show_dir(run_dir):
    logger.info('=== Listing run dir ===')
    map(logger.info, ls(run_dir))
    map(logger.info, ls(run_dir + '/*'))
    map(logger.info, ls(run_dir + '/*/*'))
    map(logger.info, ls(run_dir + '/*/*/*'))
    map(logger.info, ls(run_dir + '/*/*/*/*'))
示例#4
0
def show_dir(run_dir):
	print('\n=== Listing run dir ===')
	write_list(ls(run_dir))
	write_list(ls(run_dir + '/*'))
	write_list(ls(run_dir + '/*/*'))
	write_list(ls(run_dir + '/*/*/*'))
	write_list(ls(run_dir + '/*/*/*/*'))
示例#5
0
def show_dir(run_dir):
    print("\n=== Listing run dir ===")
    write_list(ls(run_dir))
    write_list(ls(run_dir + "/*"))
    write_list(ls(run_dir + "/*/*"))
    write_list(ls(run_dir + "/*/*/*"))
    write_list(ls(run_dir + "/*/*/*/*"))
示例#6
0
def show_dir(run_dir):
	print('\n=== Listing run dir ===')
	write_list(ls(run_dir))
	write_list(ls(run_dir + '/*'))
	write_list(ls(run_dir + '/*/*'))
	write_list(ls(run_dir + '/*/*/*'))
	write_list(ls(run_dir + '/*/*/*/*'))
示例#7
0
	def compress_sample_submission(self, dir_name, destination):
		''' Create 3 samples submisssions ready to go: one with results, 
		one with unstrained model, and one with trained model. '''
		execution_success = 1
		zf = zipfile.ZipFile(os.path.join(self.starting_kit_dir, destination + '.zip'), mode='w')
		if destination.find('result')>=0:
			# This is a prediction result directory
			file_names = ls(os.path.join(dir_name, '*.predict'))
		else:
			# This is a code directory
			file_names = ls(os.path.join(dir_name, '*.py'))
			metadata = os.path.join(dir_name, 'metadata')
			if os.path.exists(metadata): 
				file_names = file_names + [ metadata ]
			# Add the pickle?
			if destination.find('trained')==-1:
				pickle = ls(os.path.join(dir_name, '*.piclke'))
				file_names = file_names + pickle
		print('    Compressing submission files:')
		print('	{}'.format(file_names))
		# create the zip file 
		try:
			for file_name in file_names:
				[dirnm, filenm] = os.path.split(file_name)
				# Add file to the zip file
				# first parameter file to zip, second filename in zip
				zf.write(file_name, filenm, compress_type=self.compression)
			print('[+] Success')
		except:
			print('[-] An error occurred while zipping code files: ' + dir_name)
			execution_success = 0
		finally:
			# Close the file
			zf.close() 	
		return execution_success
示例#8
0
def kaydet(version_ismi):

    dosyalar = ls("*.txt")
    klasor = getirklasor()
    for kdosyalar in klasor:

        x = ls(f"{kdosyalar}/*.txt")
        for j in x:
            içtext = j.split('\\')[-1]
            dosyalar.append(j)


    print(dosyalar)
    versiyon_dosyası = open(f'backup/versiyonlar/{version_ismi}', 'w')

    for dosya_ismi in dosyalar:
        içerik = open(dosya_ismi).read()

        md5_hash = md5(içerik.encode('utf-8')).hexdigest()

        open(f'backup/içerikler/{md5_hash}', 'w').write(içerik)

        versiyon_dosyası.write(
            md5_hash + ',' + dosya_ismi + '\n'
        )
示例#9
0
 def compress_competition_bundle(self, destination):
     ''' Compress the overall competition bundle. '''
     execution_success = 1
     print('    Compressing competition bundle: {}'.format(destination))
     zf = zipfile.ZipFile(destination + '.zip', mode='w')
     try:
         for dirname in ls(os.path.join(destination, '*')):
             [dirnm, filenm_] = os.path.split(dirname)
             # Add file to the zip file
             # first parameter file to zip, second filename in zip
             print('	+ Adding {}'.format(filenm_))
             zf.write(dirname, filenm_, compress_type=self.compression)
             if os.path.isdir(filenm_):
                 print('	+ Adding {} contents:'.format(dirname))
                 file_names = ls(os.path.join(dirname, '*'))
                 print(file_names)
                 for file_name in file_names:
                     print(file_name)
                     [dirnm, filenm] = os.path.split(file_name)
                     zf.write(file_name,
                              os.path.join(filenm_, filenm),
                              compress_type=self.compression)
         print('[+] Success')
     except:
         print(
             '[-] An error occurred while zipping the competition bundle: '
             + destination)
         execution_success = 0
     finally:
         # Close the file
         zf.close()
     return execution_success
示例#10
0
def validate(metadata_file_location, config=None, verbose=True):

    #
    from os.path import isfile as exist
    from os.path import abspath, join, basename
    from os import pardir

    #
    run_dir = abspath(join(metadata_file_location, pardir)) + '/'

    # The folder is valid if there is l=m=2 mode data in the following dirs
    status = len(ls(run_dir + '/Ylm_WEYLSCAL4::Psi4r_l2_m1_r*')) > 0
    status = status or len(ls(run_dir + '/mp_WeylScal4::Psi4i_l2_m2_r*')) > 0

    # Let the people know
    if not status:
        msg = 'waveform data could not be found.'
        if verbose: warning(msg, 'maya.validate')

    # ignore directories with certain tags in filename
    ignore_tags = ['backup', 'old', 'archive']
    for tag in ignore_tags:
        status = status and not (tag in run_dir)

    # ensure that file name is the same as the folder name
    a = basename(metadata_file_location).split(config.metadata_id)[0]
    b = parent(metadata_file_location)
    status = status and (a in b)

    #
    return status
示例#11
0
def copy_results(datanames, result_dir, output_dir, verbose):
    ''' This function copies all the [dataname.predict] results from result_dir to output_dir'''
    missing_files = []
    for basename in datanames:
        try:
            missing = False
            test_files = ls(result_dir + "/" + basename + "*_test*.predict")
            if len(test_files) == 0:
                vprint(verbose,
                       "[-] Missing 'test' result files for " + basename)
                missing = True
            valid_files = ls(result_dir + "/" + basename + "*_valid*.predict")
            if len(valid_files) == 0:
                vprint(verbose,
                       "[-] Missing 'valid' result files for " + basename)
                missing = True
            if missing == False:
                for f in test_files:
                    copy2(f, output_dir)
                for f in valid_files:
                    copy2(f, output_dir)
                vprint(verbose, "[+] " + basename.capitalize() + " copied")
            else:
                missing_files.append(basename)
        except:
            vprint(verbose, "[-] Missing result files")
            return datanames
    return missing_files
示例#12
0
    def loadData(self, data_dir=""):
        ''' Get the data from csv files.'''
        success = True
        data_reloaded = False
        vprint(
            self.verbose,
            "DataManager :: ========= Reading training data from " + data_dir)
        start = time.time()
        if self.use_pickle and self.reloadData(self.cache_file):
            # Try to reload the file from a pickle
            data_reloaded = True  # Turn "success" to false if there is a problem.
        else:
            # Load metadata
            metadata = yaml.load(open(join(data_dir, 'metadata'), 'r'),
                                 Loader=yaml.FullLoader)
            self.stride = metadata['stride']
            self.horizon = metadata['horizon']
            self.ycol0 = metadata['ycol0']
            # Load the training data data into X and t.
            data_file_list = sorted(ls(join(data_dir, "training", "*.csv")))
            vprint(self.verbose,
                   "DataManager :: ========= Load data from files:")
            vprint(self.verbose, data_file_list)
            header = np.genfromtxt(data_file_list[0],
                                   delimiter=',',
                                   max_rows=1,
                                   names=True)
            self.col_names = header.dtype.names[1:]
            for data_file in data_file_list:
                data = np.genfromtxt(data_file, delimiter=',', skip_header=1)
                self.t = np.append(self.t, data[:, 0])
                if self.X.shape[0] == 0:
                    self.X = data[:, 1:]
                else:
                    self.X = np.append(self.X, data[:, 1:], axis=0)
            self.t0 = self.t.shape[0]
            # Append the evaluation data data to X and t.
            data_file_list = sorted(ls(join(data_dir, "evaluation", "*.csv")))
            vprint(self.verbose, data_file_list)
            for data_file in data_file_list:
                data = np.genfromtxt(data_file, delimiter=',', skip_header=1)
                self.t = np.append(self.t, data[:, 0])
                self.X = np.append(self.X, data[:, 1:], axis=0)

        if self.use_pickle and not data_reloaded:
            # Save data as a pickle for "faster" later reload
            self.saveData(self.cache_file)

        end = time.time()
        if len(self.X) == 0:
            success = False
            vprint(self.verbose, "[-] Loading failed")
        else:
            vprint(
                self.verbose, "[+] Success, loaded %d samples in %5.2f sec" %
                (self.t.shape[0], end - start))
        self.resetTime()
        return success
示例#13
0
def show_io(input_dir, output_dir):
    logger.info('=== DIRECTORIES ===')
    # Show this directory
    logger.info("-- Current directory " + pwd() + ":")
    map(logger.info, ls('.'))
    map(logger.info, ls('./*'))
    map(logger.info, ls('./*/*'))

    # List input and output directories
    logger.info("-- Input directory " + input_dir)
    map(logger.info, ls(input_dir))
    map(logger.info, ls(input_dir + '/*'))
    map(logger.info, ls(input_dir + '/*/*'))
    map(logger.info, ls(input_dir + '/*/*/*'))

    logger.info("-- Output directory  " + output_dir)
    map(logger.info, ls(output_dir))
    map(logger.info, ls(output_dir + '/*'))

    # write meta data to sdterr
    logger.info('=== METADATA ===')
    logger.info("-- Current directory " + pwd() + ":")
    try:
        metadata = yaml.load(open('metadata', 'r'))
        for key, value in metadata.items():
            logger.info(key + ': ' + str(value))
    except:
        logger.info("none")
    logger.info("-- Input directory " + input_dir + ":")
    try:
        metadata = yaml.load(open(os.path.join(input_dir, 'metadata'), 'r'))
        for key, value in metadata.items():
            logger.info(key + ': ' + str(value))
    except:
        logger.info("none")
示例#14
0
def image_list(library_dir):
    image_file_list = (ls("{}/{}/*/original.png".format(ROOT, library_dir)) +
                       ls("{}/{}/*/original.jpg".format(ROOT, library_dir)))
    app.logger.debug(image_file_list)
    images = []
    for f in image_file_list:
        img = {}
        img['key'] = image_key(f)
        img['thumbnail_url'] = thumbnail_file(f)
        images.append(img)
    return render_template('image_list.html',
                           title='Avians ' + os.path.basename(ROOT),
                           library_dir=library_dir,
                           images=images)
示例#15
0
def validate( metadata_file_location, config = None ):

    #
    from os.path import isfile as exist
    from os.path import abspath,join,basename
    from os import pardir

    #
    run_dir = abspath( join( metadata_file_location, pardir ) )+'/'

    # The folder is valid if there is l=m=2 mode data in the following dirs
    status = len( ls( run_dir + '/Psi4ModeDecomp/psi3col*l2.m2.gz' ) ) > 0

    # ignore directories with certain tags in filename
    ignore_tags = ['backup','old']
    for tag in ignore_tags:
        status = status and not ( tag in run_dir )

    #
    a = basename(metadata_file_location).split(config.metadata_id)[0]
    b = parent(metadata_file_location)
    status = status and (  a in b  )

    #
    return status
示例#16
0
 def compress_data(self, dir_name, destination):
     ''' Compress data files in AutoML split format. '''
     execution_success = 1
     file_names = ls(os.path.join(dir_name, '*.*'))
     print('    Compressing data files:')
     print('	{}'.format(file_names))
     # create zip files for input_data and reference data
     z_input = zipfile.ZipFile(os.path.join(destination, 'input_data.zip'),
                               mode='w')
     z_ref1 = zipfile.ZipFile(os.path.join(destination,
                                           'reference_data_1.zip'),
                              mode='w')
     z_ref2 = zipfile.ZipFile(os.path.join(destination,
                                           'reference_data_2.zip'),
                              mode='w')
     try:
         for file_name in file_names:
             [dirnm, filenm] = os.path.split(file_name)
             # Add file to the zip file
             # first parameter file to zip, second filename in zip
             if filenm.find('valid.solution') == -1 and filenm.find(
                     'test.solution') == -1 and filenm.find(
                         'private.info') == -1:
                 #print('Add {} to input'.format(filenm))
                 z_input.write(file_name,
                               filenm,
                               compress_type=self.compression)
             if filenm.find('public.info') >= 0:
                 #print('Add {} to refs'.format(filenm))
                 z_ref1.write(file_name,
                              filenm,
                              compress_type=self.compression)
                 z_ref2.write(file_name,
                              filenm,
                              compress_type=self.compression)
             if filenm.find('valid.solution') >= 0:
                 #print('Add {} to ref1'.format(filenm))
                 z_ref1.write(file_name,
                              filenm,
                              compress_type=self.compression)
             if filenm.find('test.solution') >= 0:
                 #print('Add {} to ref2'.format(filenm))
                 z_ref2.write(file_name,
                              filenm,
                              compress_type=self.compression)
         self.starting_kit_files += [
             'sample_code_submission.zip', 'sample_result_submission.zip',
             'sample_trained_submission.zip'
         ]
         print('[+] Success')
     except:
         print('[-] An error occurred while zipping data files: ' +
               dir_name)
         execution_success = 0
     finally:
         # Close the files
         z_input.close()
         z_ref1.close()
         z_ref2.close()
     return execution_success
示例#17
0
def validate(metadata_file_location, config=None, verbose=False):

    #
    from os.path import isfile as exist
    from os.path import abspath, join, basename, getsize
    from os import pardir

    #
    run_dir = abspath(join(metadata_file_location, pardir)) + '/'

    # The folder is valid if there is l=m=2 mode data in the following dirs
    min_file_list = ls(run_dir + '/Psi4ModeDecomp/psi3col*l2.m2.gz')
    status = len(min_file_list) > 0

    # Ensuer that data is non-empty
    status = getsize(min_file_list[0]) > 25 if status else False

    # ignore directories with certain tags in filename
    ignore_tags = ['backup', 'old']
    for tag in ignore_tags:
        status = status and not (tag in run_dir)

    #
    a = basename(metadata_file_location).split(config.metadata_id)[0]
    b = parent(metadata_file_location)
    status = status and (a in b)

    #
    return status
示例#18
0
 def compress_starting_kit(self, destination):
     ''' Compress relevant directories and files from the starting kit. '''
     execution_success = 1
     print('    Compressing starting kit files:')
     print('	{}'.format(self.starting_kit_files))
     zf = zipfile.ZipFile(os.path.join(destination, 'starting_kit.zip'),
                          mode='w')
     try:
         for filenm_ in self.starting_kit_files:
             # Add file to the zip file
             # first parameter file to zip, second filename in zip
             dirname = os.path.join(self.starting_kit_dir, filenm_)
             #print('	+ Adding {}'.format(dirname))
             zf.write(dirname, filenm_, compress_type=self.compression)
             if os.path.isdir(dirname):
                 #print('	+ Adding {} contents:'.format(dirname))
                 file_names = ls(os.path.join(dirname, '*'))
                 #print(file_names)
                 for file_name in file_names:
                     if (file_name.find('__pycache__') == -1
                             and file_name.find('.pyc') == -1):
                         #print(file_name)
                         [dirnm, filenm] = os.path.split(file_name)
                         zf.write(file_name,
                                  os.path.join(filenm_, filenm),
                                  compress_type=self.compression)
         print('[+] Success')
     except:
         print('[-] An error occurred while zipping starting kit files: ' +
               self.starting_kit_dir)
         execution_success = 0
     finally:
         # Close the file
         zf.close()
     return execution_success
示例#19
0
 def compress_code(self, dir_name, destination):
     ''' Compress all '.py' files in dir_name. Add metadata if it exists. '''
     execution_success = 1
     file_names = ls(os.path.join(dir_name, '*.py'))
     metadata = os.path.join(dir_name, 'metadata')
     if os.path.exists(metadata):
         file_names = file_names + [metadata]
     metric = os.path.join(dir_name, 'metric.txt')
     if os.path.exists(metric):
         file_names = file_names + [metric]
     print('    Compressing code files:')
     print('	{}'.format(file_names))
     # create the zip file
     [dirnm, filenm] = os.path.split(dir_name)
     zf = zipfile.ZipFile(os.path.join(destination, filenm + '.zip'),
                          mode='w')
     try:
         for file_name in file_names:
             [dirnm, filenm] = os.path.split(file_name)
             # Add file to the zip file
             # first parameter file to zip, second filename in zip
             zf.write(file_name, filenm, compress_type=self.compression)
         print('[+] Success')
     except:
         print('[-] An error occurred while zipping code files: ' +
               dir_name)
         execution_success = 0
     finally:
         # Close the file
         zf.close()
     return execution_success
示例#20
0
	def compress_data(self, dir_name, destination):
		''' Compress data files in AutoML split format. '''
		execution_success = 1
		file_names = ls(os.path.join(dir_name, '*'))
		print('    Compressing data files:')
		print('	{}'.format(file_names))
		# create zip files for training_data and evaluation_data
		z_training= zipfile.ZipFile(os.path.join(destination, 'training_data.zip'), mode='w')
		z_evaluation = zipfile.ZipFile(os.path.join(destination, 'evaluation_data.zip'), mode='w')
		try:
			for file_name in file_names:
				[dirnm, filenm] = os.path.split(file_name)
				# Add file to the zip file
				# first parameter file to zip, second filename in zip
				z_evaluation.write(file_name, filenm, compress_type=self.compression)
				zip_dir(z_evaluation, file_name, compress_type=self.compression) # zip everything
				z_training.write(file_name, filenm, compress_type=self.compression)
				if filenm.find('training')==0:  
					zip_dir(z_training, file_name, compress_type=self.compression) # zip only trainign directory
			self.starting_kit_files += ['sample_code_submission.zip'] # No 'sample_result_submission.zip', 'sample_trained_submission.zip'
			print('[+] Success')          	
		except:
			print('[-] An error occurred while zipping data files: ' + dir_name)
			execution_success = 0
		finally:
			# Close the files
			z_evaluation.close()
			z_training.close()
		return execution_success
示例#21
0
def inventory_data(input_dir):
    training_names = ls(input_dir + '/*/*_train.data')
    for i in range(0,len(training_names)):
        name = training_names[i]
        training_names[i] = name[-name[::-1].index(filesep):-name[::-1].index('_')-1]
        check_dataset(os.path.join(input_dir, training_names[i]), training_names[i])
    return training_names
示例#22
0
def inventory_data(input_dir):
    training_names = ls(input_dir + '/*/*_train.data') # This supports subdirectory structures obtained by concatenating bundles
    for i in range(0,len(training_names)):
        name = training_names[i]
        training_names[i] = name[-name[::-1].index(filesep):-name[::-1].index('_')-1]
        check_dataset(os.path.join(input_dir, training_names[i]), training_names[i])
    return training_names
示例#23
0
def inventory_data_nodir(input_dir):
    ''' Inventory data, assuming flat directory structure'''
    training_names = ls(os.path.join(input_dir, '*_train.data'))
    for i in range(0,len(training_names)):
        name = training_names[i]
        training_names[i] = name[-name[::-1].index(filesep):-name[::-1].index('_')-1]
        check_dataset(input_dir, training_names[i])
    return training_names
示例#24
0
def home():
    raw_root_list = ls("{}/*".format(ROOT))
    app.logger.debug("raw_root_list: {}".format(raw_root_list))
    root_list = [
        os.path.basename(r) for r in raw_root_list if os.path.isdir(r)
    ]
    app.logger.debug("root_list: {}".format(root_list))
    return render_template('home.html', dirs=root_list)
示例#25
0
def inventory_data_nodir(input_dir):
	# THIS IS THE OLD STYLE WITH NO SUB-DIRECTORIES
    training_names = ls(os.path.join(input_dir, '*_train.data'))
    for i in range(0,len(training_names)):
        name = training_names[i]
        training_names[i] = name[-name[::-1].index(filesep):-name[::-1].index('_')-1]
        check_dataset(input_dir, training_names[i])
    return training_names
示例#26
0
def check_xml():
  for directory in ("xml",):
    for file in ls('../%s/*.xml' % directory):
      try:
        print file
        parse(file, ContentHandler())
      except:
        raise
示例#27
0
def inventory_data_dir(input_dir):
    ''' Inventory data, assuming flat directory structure, assuming a directory hierarchy'''
    training_names = ls(input_dir + '/*/*_train.data') # This supports subdirectory structures obtained by concatenating bundles
    for i in range(0,len(training_names)):
        name = training_names[i]
        training_names[i] = name[-name[::-1].index(filesep):-name[::-1].index('_')-1]
        check_dataset(os.path.join(input_dir, training_names[i]), training_names[i])
    return training_names
示例#28
0
def inventory_data_dir(input_dir):
    ''' Inventory data, assuming flat directory structure, assuming a directory hierarchy'''
    training_names = ls(input_dir + '/*/*_train1.data') # This supports subdirectory structures obtained by concatenating bundles
    for i in range(0,len(training_names)):
        name = training_names[i]
        training_names[i] = name[-name[::-1].index(filesep):-name[::-1].index('_')-1]
        #check_dataset(os.path.join(input_dir, training_names[i]), training_names[i])
    return training_names
示例#29
0
def test_labelmap_to_dir():
    lmfs = ls(os.path.expandvars('$HOME/Annex/Arabic/working-set-1/*/labelmap.lm2'))
    assert len(lmfs) > 0, str(lmfs)
    for lmf in lmfs:
        lmdir = '/tmp/{}/'.format(os.path.basename(lmf))
        vs.labelmap_to_dir(lmf, lmdir)
        assert os.path.exists(lmdir)
        assert os.path.exists(os.path.join(lmdir, 'labels.txt'))
示例#30
0
def inventory_data_nodir(input_dir):
    ''' Inventory data, assuming flat directory structure'''
    training_names = ls(os.path.join(input_dir, '*_train.data'))
    for i in range(0,len(training_names)):
        name = training_names[i]
        training_names[i] = name[-name[::-1].index(filesep):-name[::-1].index('_')-1]
        check_dataset(input_dir, training_names[i])
    return training_names
示例#31
0
def unique_file(p):
    "Returns the path for original image file in given lib dir and image key"
    fl = ls(p)
    if len(fl) == 0:
        raise Exception("No such file found: {}".format(p))
    if len(fl) > 1:
        raise Exception("More than 1 file found: {}".format(str(fl)))
    else:
        return fl[0]
示例#32
0
def getirklasor():
    klasorler=list()

    dizinler=ls("*")
    dizinler.remove("backup")
    for klasor in dizinler:
        if(os.path.isdir(klasor)):
            klasorler.append(klasor)
    return klasorler
 def __init__(self, datasetpath):
     self.cachename = None
     self.log = StringIO()
     self.pool = []
     self.agency = {}
     self.topRank = 1
     for csvpath in ls(datasetpath + '/*.csv'):
         csvname = csvpath.split(".")[0].split("/")[-1]
         self.agency[csvname] = GraphAgent(ExtractGraphFromCSV(csvpath))
    def Load(self, dirname):
        dirpath = './cache/' + dirname
        if not Path(dirpath).exists():
            self.Store(dirname)
            self.cachename = dirname
            return
        for agentcsvpath in ls(dirpath + '/*-agent.csv'):
            agentname = agentcsvpath.split('-')[-2].split('/')[-1]
            if agentname not in self.agency:
                raise (
                    dirname +
                    'cache isn\'t compatible with the runtime master, missing agent for '
                    + agentname)

        # BEGIN LOADING
        self.cachename = dirname

        # load agents' rankings
        for agentcsvpath in ls(dirpath + '/*-agent.csv'):
            agentname = agentcsvpath.split('-')[-2].split('/')[-1]
            self.agency[agentname].ranking.clear()  # Flush prior ranking
            with open(agentcsvpath, 'r') as agentcsv:
                for line in csv.DictReader(agentcsv, delimiter=','):
                    interval = (float(line['Interval Start']),
                                float(line['Interval End']))
                    rank = int(line['Rank'])
                    self.agency[agentname].ranking.append((interval, rank))
            self.agency[agentname].ranking.sort(reverse=True, key=ByRank)

        # load master's pool
        self.pool.clear()
        with open(dirpath + '/master-pool.csv', 'r') as poolcsv:
            segment = []
            for line in csv.reader(poolcsv, delimiter=','):
                if "rank" in line[0]:
                    rank = int(line[0].split(' = ')[1])
                    if self.topRank < rank:
                        self.topRank = rank
                    self.pool.append((segment, rank))
                    segment = []
                else:
                    del line[-1]
                    segment.append(tuple(map(float, line)))
        self.pool.sort(reverse=True, key=ByRank)
示例#35
0
    def check(self):
        ''' Checks that the starting kit structure is correct and that
		the ingestion program and scoring program work.'''
        execution_success = 1
        # Verify the structure of the starting kit
        actual_starting_kit_files = set([
            os.path.basename(x)
            for x in ls(os.path.join(self.starting_kit_dir, '*'))
        ])
        desired_starting_kit_file = set(self.starting_kit_files)
        print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
        print("%% CHECKS %% 1/3 %% CHECKS %% 1/3 %% CHECKS %%")
        print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
        print("    Checking starting kit structure:")
        print('	{}'.format(self.starting_kit_files))
        if actual_starting_kit_files & desired_starting_kit_file != desired_starting_kit_file:
            print("[-] Failed, got:")
            print('	{}'.format(actual_starting_kit_files))
            return 0
        else:
            print("[+] Passed")
        # Add "sample_result_submission" to the list of things to deliver with the starting kit
        self.starting_kit_files.append('sample_result_submission')
        # Add and HTML version of the jupyter notebook (unfortunately this messes up the website, we don't do it for now)
        #print("    Creating HTML version of notebook:")
        #command_notebook = 'jupyter nbconvert --to html {} --stdout >> {}'.format(os.path.join(self.starting_kit_dir, 'README.ipynb'), os.path.join(self.html_pages, 'README.html'))
        #os.system(command_notebook)
        #print("[+] Done")
        # Create directories if they do not already exits
        if not os.path.isdir(self.sample_result_submission):
            os.mkdir(self.sample_result_submission)
        if not os.path.isdir(self.scoring_output):
            os.mkdir(self.scoring_output)
        print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
        print("%% CHECKS %% 2/3 %% CHECKS %% 2/3 %% CHECKS %%")
        print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
        # Run the ingestion program with sample data or big data
        path_ingestion = os.path.join(self.ingestion_program, 'ingestion.py')
        if big_data_dir:
            data_dir = self.big_data
        else:
            data_dir = self.sample_data
        command_ingestion = 'python {} {} {} {} {}'.format(
            path_ingestion, data_dir, self.sample_result_submission,
            self.ingestion_program, self.sample_code_submission)
        os.system(command_ingestion)
        print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
        print("%% CHECKS %% 3/3 %% CHECKS %% 3/3 %% CHECKS %%")
        print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
        # Run the scoring program
        path_scoring = os.path.join(self.scoring_program, 'score.py')
        command_scoring = 'python {} {} {} {}'.format(
            path_scoring, data_dir, self.sample_result_submission,
            self.scoring_output)
        os.system(command_scoring)
        return
示例#36
0
def copy_results(datanames, result_dir, output_dir, verbose):
    ''' This function copies all the [dataname.predict] results from result_dir to output_dir'''
    for basename in datanames:
        try:
            test_files = ls(result_dir + "/" + basename + "*_test*.predict")
            if len(test_files)==0: 
                vprint(verbose, "[-] Missing 'test' result files for " + basename) 
                return 0
            for f in test_files: copy2(f, output_dir)
            valid_files = ls(result_dir + "/" + basename + "*_valid*.predict")
            if len(valid_files)==0: 
                vprint(verbose, "[-] Missing 'valid' result files for " + basename) 
                return 0
            for f in valid_files: copy2(f, output_dir)
            vprint( verbose,  "[+] " + basename.capitalize() + " copied")
        except:
            vprint(verbose, "[-] Missing result files")
            return 0
    return 1
示例#37
0
def inventory_data(input_dir):
    '''
    :return 
        @training_names: (list) - all datasets in the input directory in alphabetical order
    '''
    training_names = ls(os.path.join(input_dir, '*.data'))
    training_names = [name.split('/')[-1] for name in training_names]
    
    if len(training_names) == 0:
        LOGGER.warning('WARNING: Inventory data - No data file found')
    return sorted(training_names)
示例#38
0
文件: data_io.py 项目: miniproV/GP
def copy_results(datanames, result_dir, output_dir, verbose):
    ''' This function copies all the [dataname.predict] results from result_dir to output_dir'''
    missing_files = []
    for basename in datanames:
        try:
            missing = False
            test_files = ls(result_dir + "/" + basename + "*_test*.predict")
            if len(test_files) == 0:
                vprint(verbose, "[-] Missing 'test' result files for " + basename)
                missing = True
            valid_files = ls(result_dir + "/" + basename + "*_valid*.predict")
            if len(valid_files) == 0:
                vprint(verbose, "[-] Missing 'valid' result files for " + basename)
                missing = True
            if missing == False:
                for f in test_files: copy2(f, output_dir)
                for f in valid_files: copy2(f, output_dir)
                vprint(verbose, "[+] " + basename.capitalize() + " copied")
            else:
                missing_files.append(basename)
        except:
            vprint(verbose, "[-] Missing result files")
            return datanames
    return missing_files
示例#39
0
def show_io(input_dir, output_dir):     
	swrite('\n=== DIRECTORIES ===\n\n')
	# Show this directory
	swrite("-- Current directory " + pwd() + ":\n")
	write_list(ls('.'))
	write_list(ls('./*'))
	write_list(ls('./*/*'))
	swrite("\n")
	
	# List input and output directories
	swrite("-- Input directory " + input_dir + ":\n")
	write_list(ls(input_dir))
	write_list(ls(input_dir + '/*'))
	write_list(ls(input_dir + '/*/*'))
	write_list(ls(input_dir + '/*/*/*'))
	swrite("\n")
	swrite("-- Output directory  " + output_dir + ":\n")
	write_list(ls(output_dir))
	write_list(ls(output_dir + '/*'))
	swrite("\n")
        
    # write meta data to sdterr
	swrite('\n=== METADATA ===\n\n')
	swrite("-- Current directory " + pwd() + ":\n")
	try:
		metadata = yaml.load(open('metadata', 'r'))
		for key,value in metadata.items():
			swrite(key + ': ')
			swrite(str(value) + '\n')
	except:
		swrite("none\n");
	swrite("-- Input directory " + input_dir + ":\n")
	try:
		metadata = yaml.load(open(os.path.join(input_dir, 'metadata'), 'r'))
		for key,value in metadata.items():
			swrite(key + ': ')
			swrite(str(value) + '\n')
		swrite("\n")
	except:
		swrite("none\n");
示例#40
0
文件: compare.py 项目: lero/python
#!/usr/bin/python

import os
import sys
from glob import glob as ls

dict = {}
for file in ls('*.php'):
    if os.path.isfile('9/%s' % file):
        for line in open(file):
            if line.find('=>') != -1:
                field = line.split('=>')[0].strip()
                for anotherline in open('pt-br/%s' % file.replace('en-US','pt-BR')):
                    if anotherline.find(field) != -1:
                        dict.update({field: "".join(anotherline.split('=>')[1:])})

        newfile = open('final/%s' % file.replace('en-US','pt-BR'), 'w')
        for line in open(file):
            field = line.split('=>')[0].strip()
            if dict.has_key(field):
                newfile.write("%s => %s" % (line.split('=>')[0], dict[field]))
            else:
                newfile.write(line)
        newfile.close()

#for file in *.php 
#do
#    for field in $( cat $file | awk '/=>/ { print $1 }' ) ; do 
#        if [ -f 9/$file ] ; then
#            novo=$( grep -w $field pt-br/${file/en-US/pt-BR} | awk -F'=>' '{print $2}')
#            #grep -w $field $file 9/$file
示例#41
0
文件: tbtopng.py 项目: tymmej/TBtoPNG
from os.path import join
from re import match
from re import escape
from os import rename
from os import system
#from numpy import unique
import numpy
import sys

Dir = "/Users/tymmej/Downloads/"
mapName = sys.argv[1]
baseDir = join(Dir, mapName)
setDir = join(baseDir, "set")

print(setDir)
listOfFiles = ls(join(setDir, "*.png"))
#print(listOfFiles)
regex = r'' + escape(mapName) + r"_(\d+)_(\d+).png"
renamePattern = join(setDir, regex)

firstNumList = []
for item in listOfFiles:
    filePath = join(setDir, item)
    r = match(renamePattern, item)

    if not(r is None):
        firstNum = '%05d' % int(r.group(1))
        secondNum = '%05d' % int(r.group(2))

        firstNumList.append(firstNum)
示例#42
0
import sys
from glob import glob as ls

total = 0
queue = {}
aicm = ['active','incoming','maildrop','deferred']

def dirwalk(dir):
    "walk a directory tree, using a generator"
    for f in os.listdir(dir):
        fullpath = os.path.join(dir,f)
        if os.path.isdir(fullpath) and not os.path.islink(fullpath):
            for x in dirwalk(fullpath):  # recurse into subdir
                yield x
        else:
            yield fullpath


for postfix in ls('/var/spool/postfix*'):
    name = os.path.basename(postfix)
    if len(sys.argv) == 2 and name != sys.argv[1]:
        continue
    mails = 0
    for directory in aicm:
        mails = mails + len([ msgid for msgid in dirwalk("%s/%s" % (postfix,directory))])
    queue[name] = mails
    total = queue[name] + total
    print "[ \033[34m\033[1m%s\033[0m ]" % postfix
    print " \033[36m\033[1m %s \033[0m messages on queue." % queue[name]
print "\n\033[34m\033[1m:: \033[0m\033[36m\033[1m%s\033[0m\033[34m\033[1m total messages\033[0m" % total
示例#43
0
def learn_metadata( metadata_file_location ):

    # Try to load the related par file as well as the metadata file
    par_file_location = metadata_file_location[:-3]+'par'
    raw_metadata = smart_object( [metadata_file_location,par_file_location] )

    # shortand
    y = raw_metadata

    # # Useful for debugging -- show what's in y
    # y.show()

    #
    standard_metadata = smart_object()
    # shorthand
    x = standard_metadata

    # Keep NOTE of important information
    x.note = ''

    # Creation date of metadata file
    x.date_number = getctime(  metadata_file_location  )

    '''
    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    %% Calculate derivative quantities  %%
    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    '''

    # Masses
    x.m1 = y.mass1
    x.m2 = y.mass2

    # NOTE that some bbh files may not have after_junkradiation_spin data (i.e. empty). In these cases we will take the initial spin data
    S1 = array( [ y.after_junkradiation_spin1x, y.after_junkradiation_spin1y, y.after_junkradiation_spin1z ] )
    S2 = array( [ y.after_junkradiation_spin2x, y.after_junkradiation_spin2y, y.after_junkradiation_spin2z ] )

    #%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%#
    # NOTE that sometimes the afterjunk spins may not be stored correctely or at all in the bbh files. Therefore an additional validation step is needed here.
    S1bool = S1.astype(list).astype(bool)
    S2bool = S2.astype(list).astype(bool)
    x.isafterjunk = S1bool.all() and S2bool.all()
    #%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%#

    # If the data is to be stored using afterjunk parameters:
    if x.isafterjunk:
        #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
        # Use afterjunk information                   #
        msg = cyan('Initial parameters corresponding to the bbh file\'s aftrejunktime will be used to populate metadata.')
        alert(msg,'bam.py')
        x.note += msg
        #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
        # find puncture data locations
        puncture_data_1_location = ls( parent( metadata_file_location )+ 'moving_puncture_integrate1*' )[0]
        puncture_data_2_location = ls( parent( metadata_file_location )+ 'moving_puncture_integrate2*' )[0]

        # load puncture data
        puncture_data_1,_ = smart_load( puncture_data_1_location )
        puncture_data_2,_ = smart_load( puncture_data_2_location )

        # Mask away the initial junk region using the after-junk time given in the bbh metadata
        after_junkradiation_time = y.after_junkradiation_time
        after_junkradiation_mask = puncture_data_1[:,-1] > after_junkradiation_time

        puncture_data_1 = puncture_data_1[ after_junkradiation_mask, : ]
        puncture_data_2 = puncture_data_2[ after_junkradiation_mask, : ]

        R1 = array( [  puncture_data_1[0,0],puncture_data_1[0,1],puncture_data_1[0,2],  ] )
        R2 = array( [  puncture_data_2[0,0],puncture_data_2[0,1],puncture_data_2[0,2],  ] )

        # NOTE that here the shift is actually contained within puncture_data, and NOTE that the shift is -1 times the velocity
        P1 = x.m1 * -array( [  puncture_data_1[0,3],puncture_data_1[0,4],puncture_data_1[0,5],  ] )
        P2 = x.m2 * -array( [  puncture_data_2[0,3],puncture_data_2[0,4],puncture_data_2[0,5],  ] )
    else:
        #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
        # Use initial data information                #
        msg = cyan('Warning:')+yellow(' The afterjunk spins appear to have been stored incorrectly. All parameters according to the initial data (as stored in the bbh files) will be stored. ')
        warning(msg,'bam.py')
        x.note += msg
        #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
        # Spins
        S1 = array( [ y.initial_bh_spin1x, y.initial_bh_spin1y, y.initial_bh_spin1z ] )
        S2 = array( [ y.initial_bh_spin2x, y.initial_bh_spin2y, y.initial_bh_spin2z ] )
        # Momenta
        P1 = array( [ y.initial_bh_momentum1x, y.initial_bh_momentum1y, y.initial_bh_momentum1z ] )
        P2 = array( [ y.initial_bh_momentum2x, y.initial_bh_momentum2y, y.initial_bh_momentum2z ] )
        # positions
        R1 = array( [ y.initial_bh_position1x, y.initial_bh_position1y, y.initial_bh_position1z ] )
        R2 = array( [ y.initial_bh_position2x, y.initial_bh_position2y, y.initial_bh_position2z ] )


    # Estimate the component angular momenta
    try:
        L1 = cross(R1,P1)
        L2 = cross(R2,P2)
    except:
        error('There was an insurmountable problem encountered when trying to load initial binary configuration. For example, %s. The guy at the soup shop says "No soup for you!!"'%red('P1 = '+str(P1)))

    # Extract and store the initial adm energy
    x.madm = y.initial_ADM_energy

    # Store the initial linear momenta
    x.P1 = P1; x.P2 = P2
    x.S1 = S1; x.S2 = S2

    # Estimate the initial biary separation (afterjunk), and warn the user if this value is significantly different than the bbh file
    x.b = norm(R1-R2) # float( y.initial_separation )
    if abs( y.initial_separation - norm(R1-R2) ) > 1e-1:
        msg = cyan('Warning:')+' The estimated after junk binary separation is significantly different than the value stored in the bbh file: '+yellow('x from calculation = %f, x from bbh file=%f' % (norm(R1-R2),y.initial_separation) )+'. The user should understand whether this is an erorr or not.'
        x.note += msg
        warning(msg,'bam.py')
    # Let the use know that the binary separation is possibly bad
    if x.b<4:
        msg = cyan('Warning:')+' The estimated initial binary separation is very small. This may be due to an error in the puncture data. You may wish to use the initial binary separation from the bbh file which is %f'%y.initial_separation+'. '
        warning(msg,'bam.py')
        x.note += msg

    #
    x.R1 = R1; x.R2 = R2

    #
    x.L1 = L1; x.L2 = L2

    #
    x.valid = True

    # Load irriducible mass data
    irr_mass_file_list = ls(parent(metadata_file_location)+'hmass_2*gz')
    if len(irr_mass_file_list)>0:
        irr_mass_file = irr_mass_file_list[0]
        irr_mass_data,mass_status = smart_load(irr_mass_file)
    else:
        mass_status = False
    # Load spin data
    spin_file_list = ls(parent(metadata_file_location)+'hspin_2*gz')
    if len(spin_file_list)>0:
        spin_file = spin_file_list[0]
        spin_data,spin_status = smart_load(spin_file)
    else:
        spin_status = False
    # Estimate final mass and spin
    if mass_status and spin_status:
        Sf = spin_data[-1,1:]
        irrMf = irr_mass_data[-1,1]
        x.__irrMf__ = irrMf
        irrMf_squared = irrMf**2
        Sf_squared = norm(Sf)**2
        x.mf = sqrt( irrMf_squared + Sf_squared / (4*irrMf_squared) ) / (x.m1+x.m2)
        #
        x.Sf = Sf
        x.Xf = x.Sf/(x.mf*x.mf)
        x.xf = sign(x.Sf[-1])*norm(x.Sf)/(x.mf*x.mf)
    else:
        from numpy import nan
        x.Sf = nan*array([0.0,0.0,0.0])
        x.Xf = nan*array([0.0,0.0,0.0])
        x.mf = nan
        x.xf = nan

    #
    return standard_metadata, raw_metadata
示例#44
0
def learn_metadata( metadata_file_location ):

    #
    thisfun = 'maya.learn_metadata'

    # Look for stdout files
    stdout_file_list = sorted( ls( parent(metadata_file_location)+'/stdout*' ) )
    if not stdout_file_list:
        msg = 'cannot find stdout files which contain important metadata'
        error(msg,'maya.learn_metadata')
    # Look for ShiftTracker files
    shift_tracker_file_list = ls( parent(metadata_file_location)+'/Shift*' )
    if not shift_tracker_file_list:
        msg = 'cannot find ShiftTracker* files which contain information about binary dynamics'
        error(msg,'maya.learn_metadata')
    # Look for Horizon mass and spin files
    hn_file_list = ls( parent(metadata_file_location)+'/hn*' )
    if not hn_file_list:
        msg = 'cannot find hn_masspin files which contain information about remnant BH final state'
        error(msg,'maya.learn_metadata')

    # Use the first file returned by the OS
    # NOTE that this file is neeeded to get the component and net ADM masses
    stdout_file_location = stdout_file_list[0]

    # Learn the par file
    raw_metadata = smart_object( metadata_file_location )

    # Shortand
    y = raw_metadata

    # Retrieve ADM masses form the stdout file
    y.learn_string( grep( 'm+', stdout_file_location )[0] )
    y.learn_string( grep( 'm-', stdout_file_location )[0] )
    y.learn_string( grep( 'ADM mass from r', stdout_file_location )[0] )

    # # Useful for debugging -- show what's in y
    # y.show()

    # Create smart_object for the standard metadata
    standard_metadata = smart_object()
    # shorthand
    x = standard_metadata

    # Creation date of metadata file
    x.date_number = getctime(  metadata_file_location  )

    '''
    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    %% Calculate derivative quantities  %%
    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
    '''

    # Masses
    x.m1 = getattr(y,'INFO(TwoPunctures):ADMmassforpuncture:m+')
    x.m2 = getattr(y,'INFO(TwoPunctures):ADMmassforpuncture:m_')

    #
    P1 = array( [ getattr(y,'twopunctures::par_P_plus[0]'),
                  getattr(y,'twopunctures::par_P_plus[1]'),
                  getattr(y,'twopunctures::par_P_plus[2]') ] )
    P2 = array( [ getattr(y,'twopunctures::par_P_minus[0]'),
                  getattr(y,'twopunctures::par_P_minus[1]'),
                  getattr(y,'twopunctures::par_P_minus[2]') ] )

    #
    S1 = array( [ getattr(y,'twopunctures::par_s_plus[0]'),
                  getattr(y,'twopunctures::par_s_plus[1]'),
                  getattr(y,'twopunctures::par_s_plus[2]') ] )
    S2 = array( [ getattr(y,'twopunctures::par_s_minus[0]'),
                  getattr(y,'twopunctures::par_s_minus[1]'),
                  getattr(y,'twopunctures::par_s_minus[2]') ] )

    # Read initial locations from the ShiftTracker files
    def shiftt_to_initial_bh_location(key):
        shiftt0_file_location = [ f for f in shift_tracker_file_list if key in f ][0]
        fid = open( shiftt0_file_location )
        return array( [ float(a) for a in fid.readline().replace('\n','').split('\t')][2:5] )
    R1 = shiftt_to_initial_bh_location("ShiftTracker0")
    R2 = shiftt_to_initial_bh_location("ShiftTracker1")

    # Find initial binary separation for convenience
    x.b = norm(R1-R2)

    #
    x.note = 'Binary parameters correspond to initial data, not an after-junk point.'

    # Estimate the component angular momenta
    L1 = cross(R1,P1)
    L2 = cross(R2,P2)

    # Extract and store the initial adm energy
    x.madm = getattr(y,'INFO(TwoPunctures):ADMmassfromr')[-1]

    # Store the initial linear momenta
    x.P1 = P1; x.P2 = P2
    x.S1 = S1; x.S2 = S2

    # Estimate the initial biary separation (afterjunk), and warn the user if this value is significantly different than the bbh file
    x.b = norm(R1-R2)

    #
    x.R1 = R1; x.R2 = R2

    #
    x.L1 = L1; x.L2 = L2

    #
    L = L1+L2
    S = S1+S2
    x.L = L
    x.J = L+S

    # Load Final Mass and Spin Data  hn_mass_spin_2
    hn_file_bin = [ f for f in hn_file_list if 'hn_mass_spin_2' in f ]
    proceed = len(hn_file_bin)==1
    hn_file = hn_file_bin[0] if proceed else None
    nan_remnant_data = array( [ nan,nan,nan,nan,nan ] )

    #
    if not proceed:
        #
        msg = 'The default hn_mass_spin_2 file could not be found. Place-holders (i.e. nans) for the remnant information will be passed.'
        warning(msg,thisfun)
        x.note += ' ' + msg
        remnant_data = nan_remnant_data
    else:
        # Use bash's tail to get the last row in the file
        cmd = 'tail -n 1 %s' % hn_file
        data_string = bash(cmd)
        # If the string is empty, then there was an error
        if not data_string:
            msg = 'The system failed using tail to get the remnant state from \"%s\"'%cyan(cmd)
            error(msg,thisfun)
        # Else, parse the data string into floats
        remnant_data = [ float(v) for v in data_string.replace('\n','').split('\t') ]
        # Handle formatting cases
        if len(remnant_data) != 5:
            msg = 'Remanant data was loaded, but its format is unexpected (last row length is %s). Placeholders for the remnant information will be passed.' % yellow( str(len(remnant_data)) )
            warning(msg,thisfun)
            x.note += ' ' + msg
            remnant_data = nan_remnant_data

    # Unpack the remnant data
    [tf,Mf,xfx,xfy,xfz] = remnant_data
    # Store related final mass and spin data
    x.mf = Mf
    x.Sf = Mf*Mf*array([xfx,xfy,xfz])
    x.Xf = array([xfx,xfy,xfz])
    x.xf = sign(x.Sf[-1])*norm(x.Sf)/(x.mf*x.mf)

    # Store relaxed (after-junk) fields
    x.S1_afterjunk,x.S_afterjunk2,x.S_afterjunk = None,None,None
    x.L1_afterjunk,x.L2_afterjunk,x.L_afterjunk = None,None,None
    x.R1_afterjunk,x.R2_afterjunk = None,None
    x.P1_afterjunk,x.P2_afterjunk = None,None
    x.J_afterjunk = None

    #
    x.valid = True

    #
    return standard_metadata, raw_metadata