Esempio n. 1
0
 def validate(self):
     from validator import Validator
     
     v = Validator(self.ui)
     errors = v.validate()
     
     if errors:
         obj, msg = errors[0]
         QtGui.QMessageBox.critical(self, "Error", msg)
         try:
             obj.setFocus()
             obj.selectAll()
         except: pass
         return False
     else:
         iter = QtGui.QTreeWidgetItemIterator(self.ui.treeWidgetFiles)
         while iter.value():
             attachment = iter.value().attachment
             if attachment.size > self.current_account.max_size_bytes:
                 QtGui.QMessageBox.critical(self, 
                                            "Error", 
                                            "'%s' larger than %s %s" % (attachment.name, self.current_account.max_size, self.current_account.max_size_type))
                 return False
             iter += 1
             
         return True
Esempio n. 2
0
def main(schema=None, output_dir=None, config_path=None):

    """

        Validate the schema file and output directory parameters.
        Build a tree from the schema file.
        Walk the tree, calling the registered callbacks on each node. 

    """

    validator = Validator(schema, output_dir=output_dir)
    if not validator.validate():
        click.echo(validator.error['msg'])
        sys.exit(1)

    directory_tree = Tree(
        indent_size = validator.indent_size,
        output_dir  = output_dir,
        base_path   = os.path.abspath(os.curdir)
    )
    directory_tree.load_data(schema)
    directory_tree.build_tree()

    callbacks = [ pprint_node ]

    if config_path:
        process_hooks = make_config_processor(config_path)
        callbacks.append(process_hooks)

    directory_tree.walk(callbacks=callbacks)
Esempio n. 3
0
 def testPoorlyPlacedTitle(self):
     with open('buffer.txt', 'w', encoding='utf-8') as self.buffer:
         self.buffer.write('\n\n\t\t\t\t<div></div><h1>Person v. Person - 100 U.S. 25 (2000)</h1>')
     v = Validator('buffer.txt')
     with self.assertRaises(BadTitle, msg='Validator passed a title that was not at the '
                            'beginning of the file.'):
         v.validateTitlePlacement()
Esempio n. 4
0
    def test_and(self):
        def always_true(data):
            return True

        def len_lt_7(data):
            return len(data) < 7

        def always_false(data):
            return False

        lt7_validator = Validator(And(always_true, len_lt_7))
        self.assertTrue(lt7_validator.validate('hello').isEmpty())
        errorbucket = lt7_validator.validate('solongmorethan7')
        self.assertEquals(
            errorbucket.errors,
            {'func_fail': {'': FuncFail(len_lt_7, 'solongmorethan7')}})
        lt7_falsy_validator = Validator(And(always_true, always_false,
                                            len_lt_7))
        errorbucket = lt7_falsy_validator.validate('solongmorethan7')
        self.assertEquals(errorbucket.errors, {
            'func_fail': {
                '': OrderedList(FuncFail(len_lt_7, 'solongmorethan7'),
                                FuncFail(always_false, 'solongmorethan7'))
            }
        })
Esempio n. 5
0
 def testBadTitleNumber(self):
     with open('buffer.txt', 'w', encoding='utf-8') as self.buffer:
         self.buffer.write('<h1>Foo v. Bar - U.S. 200 (2013)</h1>')
     v = Validator('buffer.txt')
     with self.assertRaises(BadTitle, msg='Validator passed a title containing an improperly'
                      'formatted case number.'):
         v.validateTitleParts()
Esempio n. 6
0
 def testIdentifyCaseGroup(self):
     with open('buffer.txt', 'w', encoding='utf-8') as self.buffer:
         self.buffer.write('\t\t\t<h1>Group of Cases - 100 U.S. 200 (2013)</h1>\t\t\t')
     v = Validator('buffer.txt')
     with self.assertRaises(GroupedCase, msg='Validator failed to identify a group of cases'
                            ' as such.'):
         v.validateTitleParts()
Esempio n. 7
0
def main():
    import argparse

    parser = argparse.ArgumentParser(
        description='Check for common mistakes in LaTeX documents.')

    parser.add_argument('filenames', action='append',
                        help='List of filenames to check')

    args = parser.parse_args()

    # Count the total number of errors
    num_errors = 0

    for fname in args.filenames:
        with open(fname, 'r') as infile:
            validator = Validator()
            for lineno, line in enumerate(infile):
                for rule, span in validator.validate(line):
                    print_warning(fname, lineno, line.strip(), span, rule, args)
                    num_errors += 1

    if num_errors > 0:
        print '\nTotal of {0} mistakes found.'.format(num_errors)
        return 1
    else:
        print 'No mistakes found.'
        return 0
Esempio n. 8
0
    def are_fields_valid(self, f_def):
        """Returns true if the hash of field definitions are all valid."""
        if not f_def:
            return True
        if not Validator:
            logging.warn("Can't validate data: python-validator not installed.")
            return True
        data = self.field_data(f_def)
        errors = None
        field_list = []
        # Translate to xsd style syntax by transposing
        # the name into the hash from the key.
        for name in f_def.keys():
            field = f_def[name]
            field['name'] = name
            field['type'] = field.get('type', 'string')
            field_list.append( field )
        validator = Validator( { 'root' : field_list } )
#        try:
        errors = validator.validate( data )
#        except Exception, error:
#            logging.warn("Couldn't validate page, skipping validation: %s" % error)
        # Collect the errors and report on what fields failed
        if errors:
            result = []
            for err in errors.keys():
                if errors[err]:
                    result.append(err)
            return result
        # Otherwise we pass!
        return True
Esempio n. 9
0
 def test_missing_custom_error(self):
     validator = Validator(
         {'test': CustomMissingkeyError('MISSINGKEY!', 'hello')})
     error_bucket = validator.validate({})
     self.assertEquals(error_bucket.errors,
                       {'missing_key': {'': MissingKey('test', 'hello')}})
     self.assertEquals(error_bucket.custom_errors, ['MISSINGKEY!'])
Esempio n. 10
0
    def test_str(self):

        hello_validator = Validator('hello')
        self.assertTrue(hello_validator.validate('hello').isEmpty())
        error_bucket = hello_validator.validate('Not hello')
        self.assertEquals(error_bucket.errors,
                          {'not_equal': {'': NotEqual('hello', 'Not hello')}})
Esempio n. 11
0
 def test_surplus(self):
     simple_validator = Validator({'test': 'hello'})
     error_bucket = simple_validator.validate(
         {'test': 'hello',
          'wow so': 'doge'})
     self.assertEquals(error_bucket.errors,
                       {'surplus_key': {'': SurplusKey('wow so', 'doge')}})
Esempio n. 12
0
def main():
    """
    To run the UF model.
    """

    # k: number of folds for cross validation.
    k = 10

    input_filename = sys.argv[1]
    if_average = bool(int(sys.argv[2]))
    nums = []
    with open(input_filename, 'r') as f:
        for line in f:
            nums.append(line.strip().split(" "))

    for city in nums[0]:
        ratings_filename = "../data/reviews" + city
        network_filename = "../data/network" + city + "b.csv"
        # Creating an object for my model
        val = Validator(ratings_filename, network_filename, k, 0.)
        for llimit in map(int, nums[1]):
            for ulimit in map(int, nums[2]):
                uf = Using_Friends(val.get_network(),
                    n_ratings_lower_limit=llimit, n_ratings_upper_limit=ulimit,
                    if_average=if_average)
                (val_results, ratios) = val.validate(uf, run_all=True)
                print 'validation results: '
                print city, llimit, ulimit, ratios, val_results, \
                    np.mean(val_results)
Esempio n. 13
0
 def install_service(self, service_name, service_code):
     """
     Installs new service code in the execution environment.
     @type service_name: str
     @param service_name: The name of the service. This name must be on 
     the form name1.name2.name3, e.g., daimi.imaging.scale
     @type service_code: str
     @param service_code: The code of the service. The code will be validated
     by the Locusts code validator and thus must adhere to a lot of different 
     rules.
     @raise Exception: Raised if the code fails to validate.  
     """
     # Check the validity of the service name.
     if not Jailor.valid_service_name(service_name):
         self.__logger.info('Service with invalid name given (%s)'%service_name)
         raise Exception('Invalid service name.')
     
     # Check that the service is not already installed.
     if self.registry.has_service(service_name):
         self.__logger.info('Attempt to re-install service.')
         raise Exception('Service %s already installed.'%service_name)
     
     # Avoid malicious attempts to push __init__.py this way...
     if service_name[-8:] == '__init__':
         self.__logger.info('Attempt to hack by pushing __init__.py')
         raise Exception('Stop trying to hack me!')
     
     # Validate the code.
     v = Validator(service_code)
     try:
         v.validate()
     except ValidationError, error:
         self.__logger.info('Validation error: %s'%error.message)
         raise Exception(error.message)
 def test_str_list_one_int(self):
     str_validator = Validator([str])
     test_data = ['hello', 'oh no', 777, 'doge']
     error_bucket = str_validator.validate(test_data)
     error_expected = WrongType(int, str)
     self.assertEquals(error_bucket.errors,
                       {'wrong_type': {'2': error_expected}})
Esempio n. 15
0
def main():
    """
    To run the CF model.
    """
    input_filename = sys.argv[1]
    user_bias = bool(int(sys.argv[2]))
    item_bias = bool(int(sys.argv[3]))
    nums = []
    with open(input_filename, 'r') as f:
        for line in f:
            nums.append(line.strip().split(" "))

    for city in nums[0]:
        # Filenames needed.
        ratings_filename = "../data/reviews" + city
        network_filename = "../data/network" + city + "b.csv"
        # Create the Validator object.
        # k: number of folds for cross validation.
        k = 10
        # Creating an object for my model
        val = Validator(ratings_filename, network_filename, k, 0.)
        for nfeat in map(int, nums[1]):
            for lrate in map(float, nums[2]):
                for rparam in map(float, nums[3]):
                    my_rec = Matrix_Factorization(n_features=nfeat,
                                        learn_rate=lrate,
                                        regularization_param=rparam,
                                        optimizer_pct_improvement_criterion=2,
                                        user_bias_correction=user_bias,
                                        item_bias_correction=item_bias)
                    (val_results, ratios) = val.validate(my_rec, run_all=True)
                    print 'validation results: '
                    print city, nfeat, lrate, rparam, ratios, val_results, \
                        np.mean(val_results)
Esempio n. 16
0
    def get_url(self):

        while 1:
            url = raw_input("Please insert URL to API :")
            validator = Validator()
            if validator.validate_url(url):
                self.api_url = url
                return 0
Esempio n. 17
0
 def testGoodTitleParts(self):
     with open('buffer.txt', 'w', encoding='utf-8') as self.buffer:
         self.buffer.write('\t\t\t\t<h1>Foo v. Bar - 100 U.S. 200 (2013)</h1><div>Extra stuff</div>')
     v = Validator('buffer.txt')
     try:
         v.validateTitleParts()
     except:
         self.fail('Validator did not pass a title with good parts.')
Esempio n. 18
0
 def test_net(self):
     csvfile = open('/home/clyde/lot18/net_test.csv')
     v = Validator(csvfile)
     v.validators = ['ny_net']
     v.prohibited_states = prohibited_states
     r = v()
     csvfile.close()
     self.assertTrue([i['valid'] for i in r][0])
Esempio n. 19
0
 def test_email(self):
     csvfile = open('/home/clyde/lot18/email_test.csv')
     v = Validator(csvfile)
     v.validators = ['email']
     v.prohibited_states = prohibited_states
     r = v()
     csvfile.close()
     self.assertEqual([False,True,False], [i['valid'] for i in r])
Esempio n. 20
0
    def usersites(self, email, session_id):
        Validator.email(email)
        Validator.session(session_id)

        user = self.user.get(email)
        self.user.validate_session(user['session'], session_id)

        return self.site.get_user_urls(user['id'])
Esempio n. 21
0
    def changepassword(self, email, oldpassword, newpassword):
        Validator.email(email)
        Validator.password(newpassword)

        if self.user.get(email)['password'] != Secret.hash(oldpassword, SALT):
            raise RiverException(_('The old password is incorrect for this user.'))

        self.user.update(email, password=Secret.hash(newpassword, SALT))
Esempio n. 22
0
    def test_always_true_callable(self):
        test_data = 'hello world'

        def return_true(input):
            return True

        always_true_validator = Validator(return_true)
        always_true_validator.validate(test_data)
Esempio n. 23
0
 def testGoodTitlePlacement(self):
     with open('buffer.txt', 'w', encoding='utf-8') as self.buffer:
         self.buffer.write('\n\n\t\t\t\t<h1>Person v. Person - 100 U.S. 25 (2000)</h1>')
     v = Validator('buffer.txt')
     try:
         v.validateTitlePlacement()
     except:
         self.fail('Validator did not pass a good title.')
Esempio n. 24
0
 def test_str(self):
     str_validator = Validator(str)
     test_data = 'hello'
     str_validator.validate(test_data)
     test_error_data = 1234
     self.assertErrorBucket(
         str_validator, test_error_data,
         errors={'wrong_type': _EBN([WrongType(int, str)])})
Esempio n. 25
0
    def test_simple(self):

        simple_validator = Validator({'test': 'hello'})
        self.assertTrue(simple_validator.validate({'test': 'hello'}).isEmpty())

        simple_validator2 = Validator({'test': 'hello', 'wow so': 'doge'})
        self.assertTrue(simple_validator2.validate(
            {'test': 'hello',
             'wow so': 'doge'}).isEmpty())
Esempio n. 26
0
    def test_always_true_callable(self):
        test_data = 'hello world'

        def return_true(input):
            return True

        always_true_validator = Validator(return_true)
        self.assertTrue(
            always_true_validator.validate(test_data).isEmpty()
        )
 def test_020_validate_input(self):
     v = Validator()
     good_line = ['localhost', '127.0.0.1', 'USER_NAME', 'USER_PASSWORD']
     bad_line = ['localhost', '127.0.0.1', 'USER_NAME1', 'USER_PASSWORD1']
     ret, msg = v.validateInput(good_line, ['localhost'])
     self.assertEqual(ret, True)
     ret, msg = v.validateInput(good_line, ['local'])
     self.assertEqual(ret, False)
     ret, msg = v.validateInput(bad_line, ['localhost'])
     self.assertEqual(ret, False)
Esempio n. 28
0
class ProxyPool:
    def __init__(self):
        self.sqlite = DatabaseObject(DB_CONFIG['SQLITE'])
        self.Validator = Validator()
        self.Crawler = Crawler()

    def _monitor(self):
        while True:
            self._update(PROXYPOOL_CONFIG['UPDATE_TIME'])
            self._delete(PROXYPOOL_CONFIG['DELETE_TIME'])
            self._crawl(PROXYPOOL_CONFIG['CRAWL_TIME'])
            time.sleep(1800)

    def _crawl(self, minutes):
        query = 'SELECT COUNT(*) FROM proxy WHERE updatetime>\'%s\'' % (
        (datetime.datetime.now() - datetime.timedelta(minutes=minutes)).strftime('%Y-%m-%d %H:%M:%S'))
        count = self.sqlite.executesql(query)[0]
        if int(count[0]) < PROXYPOOL_CONFIG['MIN_IP_NUM']:
            logger.info('Crawl proxy begin')
            proxies = self.Crawler.run()
            logger.info('Crawl proxy end')
            logger.info('Validate proxy begin')
            avaliable_proxies = self.Validator.run(proxies)
            logger.info('Validate proxy end')
            if DB_CONFIG['SQLITE']:
                self.save2sqlite(avaliable_proxies)

    def _delete(self, minutes):
        query = 'DELETE FROM proxy WHERE updatetime<\'%s\'' % (
        (datetime.datetime.now() - datetime.timedelta(minutes=minutes)).strftime('%Y-%m-%d %H:%M:%S'))
        self.sqlite.executesql(query)

    def _update(self, minutes):
        query = 'SELECT ip,port FROM proxy WHERE updatetime<\'%s\'' % (
        (datetime.datetime.now() - datetime.timedelta(minutes=minutes)).strftime('%Y-%m-%d %H:%M:%S'))
        proxies = ['%s:%s' % n for n in self.sqlite.executesql(query)]
        if proxies:
            avaliable_proxies = self.Validator.run(proxies)
            self.save2sqlite(avaliable_proxies)

    def save2sqlite(self, result):
        failed = self.sqlite.insert('proxy', result)
        if failed:
            failed = self.sqlite.update('proxy', failed)
        if failed:
            logger.info('Some ip failed to save: %s' % (str(failed)))

    def _api(self):
        ProxyServer(API_CONFIG['PORT'])

    def run(self):
        t1 = threading.Thread(target=self._api)
        t2 = threading.Thread(target=self._monitor)
        t1.start()
        t2.start()
Esempio n. 29
0
 def check_game_status(self, a, b):
     if Validator.check_chess_and_mate(copy.copy(self.last_move), copy.copy(self.board), a, b, self.ui_mode):
         if self.ui_mode:
             messagebox.showinfo("Information", "Chess and mate")
         self.status = "Chess and mate"
     elif Validator.check_chess_status(self.last_move, self.board, self.ui_mode):
         if self.ui_mode:
             messagebox.showinfo("Information", "Chess")
         self.status = "Chess"
     else:
         self.status = "Next turn"
Esempio n. 30
0
    def register(self, email, password):
        Validator.email(email)
        Validator.password(password)

        if self.user.exists(email):
            raise RiverException(_('The given email address has already been registered.'))
        
        user_id = Secret.generate(128)

        self.user.insert(email, enabled=True, id=user_id, password=Secret.hash(password, SALT))

        return user_id
Esempio n. 31
0
 def test_validate_time(self, MockInput):
     """Test that a user can enter a time duration in minutes"""
     user_input = ['120']
     MockInput.side_effect = user_input
     expected_input = Validator.validate_time()
     self.assertEqual(expected_input, int(user_input[0]))
def convert(input_path, metadata=None, verbose=False):
    if verbose:
        print("Begin converting")

    # Collect the metadata
    if not metadata:
        dataset_metadata = {
            "globus_subject": "https://srdata.nist.gov/insulation/home/index",
            "acl": ["public"],
            "mdf_source_name": "nist_heat_transmission",
            "mdf-publish.publication.collection": "NIST Heat Transmission Materials",
#            "mdf_data_class": ,

            "cite_as": ["Robert R. Zarr, Josue A. Chavez, Angela Y. Lee, Geraldine Dalton, and Shari L. Young, NIST Heat Transmission Properties of Insulating and Building Materials, NIST Standard Reference Database Number 81, National Institute of Standards and Technology, Gaithersburg MD, 20899, http://srdata.nist.gov/Insulation/."],
#            "license": ,

            "dc.title": "NIST Heat Transmission Properties of Insulating and Building Materials",
            "dc.creator": "NIST",
            "dc.identifier": "http://srdata.nist.gov/Insulation/",
            "dc.contributor.author": ["Robert R. Zarr", "Josue A. Chavez", "Angela Y. Lee", "Geraldine Dalton", "Shari L. Young"],
#            "dc.subject": ,
            "dc.description": "The NIST Database on Heat Conductivity of Building Materials provides a valuable reference for building designers, material manufacturers, and researchers in the thermal design of building components and equipment. NIST has accumulated a valuable and comprehensive collection of thermal conductivity data from measurements performed with a 200-mm square guarded-hot-plate apparatus (from 1933 to 1983). The guarded-hot-plate test method is arguably the most accurate and popular method for determination of thermal transmission properties of flat, homogeneous specimens under steady state conditions.",
#            "dc.relatedidentifier": ,
            "dc.year": 2015
            }
    elif type(metadata) is str:
        try:
            with open(metadata, 'r') as metadata_file:
                dataset_metadata = json.load(metadata_file)
        except Exception as e:
            sys.exit("Error: Unable to read metadata: " + repr(e))
    elif type(metadata) is dict:
        dataset_metadata = metadata
    else:
        sys.exit("Error: Invalid metadata parameter")



    # Make a Validator to help write the feedstock
    # You must pass the metadata to the constructor
    # Each Validator instance can only be used for a single dataset
 #   dataset_validator = Validator(dataset_metadata, strict=False)
    # You can also force the Validator to treat warnings as errors with strict=True
    dataset_validator = Validator(dataset_metadata, strict=True)


    # Get the data
    # Each record also needs its own metadata
    with open(input_path) as in_file:
        dataset = json.load(in_file)
    for record in tqdm(dataset, desc="Processing data", disable= not verbose):
        link = "https://srdata.nist.gov/insulation/Search/detail/" + str(record["ID"])
        record_metadata = {
            "globus_subject": link,
            "acl": ["public"],
#            "mdf-publish.publication.collection": ,
#            "mdf_data_class": ,
#            "mdf-base.material_composition": ,

#            "cite_as": ,
#            "license": ,

            "dc.title": "Heat Transmission Properties - " + (record.get("Material") or record.get("tradename") or record.get("manufacturer", "") + str(record["ID"])),
#            "dc.creator": ,
            "dc.identifier": link,
#            "dc.contributor.author": ,
#            "dc.subject": , 
#            "dc.description": record.get("tradename", "") + " " + record.get("manufacturer", ""),
#            "dc.relatedidentifier": ,
#            "dc.year": ,

            "data": {
                "raw": json.dumps(record)
#                "files": 
                }
            }
        desc = ""
        if record.get("tradename"):
            desc += record.get("tradename")
        if record.get("manufacturer"):
            if desc:
                desc += " by "
            desc += record.get("manufacturer")
        if desc:
            record_metadata["dc.description"] = desc

        # Pass each individual record to the Validator
        result = dataset_validator.write_record(record_metadata)

        # Check if the Validator accepted the record, and print a message if it didn't
        # If the Validator returns "success" == True, the record was written successfully
        if result["success"] is not True:
            print("Error:", result["message"], ":", result.get("invalid_metadata", ""))
        # The Validator may return warnings if strict=False, which should be noted
        if result.get("warnings", None):
            print("Warnings:", result["warnings"])

    if verbose:
        print("Finished converting")
Esempio n. 33
0
 def test_invalid(self):
     fh = FileHandler(Validator())
     actual = fh.open('csvTest.cs2v')
     expected = False
     self.assertEquals(actual, expected)
Esempio n. 34
0
class Services:
    
    # Constructor
    def __init__(self, expenses = []):
        self.expenses = expenses
        self.undo_actions = UndoActions(expenses)
        self.validator = Validator()

    def remove_from_list(self, condition):
        '''
        Removes all elements which fulfill the condition 

        Input: lambda - receives an expense object and must return a bool value
        Output: -
        '''
        expenses_copy = list(self.expenses)
        removed_expenses = []

        for e in expenses_copy:
            if condition(e):
                removed_expenses.append(e)
                self.expenses.remove(e)
        
        self.undo_actions.removed_list(removed_expenses)

    def build_list(self, expenses, condition = lambda e: True):
        '''
        Builds a string representing the list of expenses 

        Input: 
            list - expenses list to be converted into string
            lambda (optional) - receives an expense object and must return a bool value
        Output: string - the converted list
        '''
        list_expenses = ""
        prints = 0

        for e in expenses:
            if condition(e):
                prints += 1
                list_expenses += "\n" + str(prints) + ". " + str(e)

        if prints < 1:
            raise ValidationError("No expenses found")

        return list_expenses

    def add_expense(self, expense):
        '''
        Adds given expense to the list of expenses

        Input: Expense - the expense to be stored
        Output: -
        '''
        self.undo_actions.added(expense)
        self.expenses.append(expense)

    def preload_list(self):
        '''
        Preloads the liist with 10 predefined expenses

        Input: -
        Output: -
        '''
        self.add_expense(Expense(1, 5, "cloathing"))
        self.add_expense(Expense(30, 10, "food"))
        self.add_expense(Expense(2, 15, "housekeeping"))
        self.add_expense(Expense(29, 20, "internet"))
        self.add_expense(Expense(3, 25, "others"))
        self.add_expense(Expense(28, 30, "transport"))
        self.add_expense(Expense(4, 35, "others"))
        self.add_expense(Expense(27, 40, "internet"))
        self.add_expense(Expense(5, 45, "housekeeping"))
        self.add_expense(Expense(26, 50, "food"))

    def add(self, params):
        '''
        Adds to list an expense

        Input: list - params from input
        Output: Error - in case of error
        '''
        day_string = params[0]
        cost_string = params[1]
        category = params[2]

        self.validator.validate_add(day_string, cost_string, category)
        day = int(day_string)
        cost = int(cost_string)

        self.add_expense(Expense(day, cost, category))

    def list_all(self):
        '''
        Turns all expenses into a string

        Input: -
        Output: string - the list or error
        '''
        return self.build_list(self.expenses)        

    def filter_above(self, params):
        '''
        Filters the expenses and keeps only the ones with the cost above the given value

        Input: list - params from input
        Output: -
        '''
        value_string = params[0]

        self.validator.validate_filter_above(value_string)
        value = int(value_string)
            
        self.remove_from_list(lambda e: e.cost <= value)

    def undo_last_action(self):
        '''
        Undoes the last action that modified the list of expenses

        Input: list - params from input
        Output: -
        '''
        if self.undo_actions.nothing_to_undo():
            raise ValidationError("No action available to undo")
        self.undo_actions.undo()
Esempio n. 35
0
class CandidateAI:
    """ Implement the Candidate and the Function used by the genetic Algorithm
    """
    _fitness = None

    def __init__(self, assignments):
        """ Create an Candidate


        :param assignments:List of Lists. Possible assignments of Lectures of Instructors of rooms to time and day.
        EXAMPLE:
                <<instructor_idx,room_idx,time_idx,day_idx>
                 <instructor_idx,room_idx,time_idx,day_idx>
                                .....
                 <instructor_idx,room_idx,time_idx,day_idx>>
        """
        self.assignments = assignments

        # Initialize the Constraint Validator
        self._validator = Validator()
    def print(self):
        for lecture_idx, lecture in enumerate(self.assignments):
            lecture_ = "Lecture"+str(lecture_idx+1)
            instructor = "Instructor"+str(lecture[0])
            room = "Room"+str(lecture[1])
            time_dict = ['8-10','10-12','12-14']
            time = time_dict[lecture[2]-1]
            day_dict = ['Monday', 'Tuesday', 'wednesday','Thursday', 'Friday']
            day = day_dict[lecture[3]-1]
            print(lecture_+":"+instructor+":"+room+":"+time+":"+day)

    def crossover(self, other_candidate, strategy="onePointSwap"):
        """Exchange of Properties of two Candidates

        :param other_candidate: Object of the other Candidate, given by the genetic Algorithm.
        :param strategy: Crossover Strategy that should be used.
        :return: empty
        """
        self._fitness = None
        if (strategy == "onePointSwap"):
            self._crossing_one_point_swap(other_candidate)
        elif strategy == "twoPointSwap":
            self._crossing_two_point_swap(other_candidate)

    def mutation(self, mutation_rate):
        """ Check which Property should changed and change it

        :param mutation_rate: Probability (from 0.0 ... 1.0) of mutation for each Property
        :return: empty
        """
        # Reset Fitness score
        self._fitness = None

        # Get index Property
        for lecture_idx, lecture in enumerate(self.assignments):
            for element_idx, element in enumerate(self.assignments[lecture_idx]):
                # Check whether property classifies for mutation
                if (random.random() < mutation_rate):
                    if element_idx == 2:  # Days only 1 to 3
                        self.assignments[lecture_idx][element_idx] = randTo(3)
                    else:  # Else 1 to 5
                        self.assignments[lecture_idx][element_idx] = randTo(5)

    def get_diversity(self, other_candidate):
        """ Compare the error between two Candidates

        :param other_candidate:
        :return: difference between Candidates in percent
        """
        div = 0
        all_elements = 0
        for lecture_idx in range(len(self.assignments)):
            for idx_inner in range(len(self.assignments[lecture_idx])):
                all_elements += 1
                if self.assignments[lecture_idx][idx_inner] != other_candidate.assignments[lecture_idx][idx_inner]:
                    div += 1

        return div/ all_elements

    def get_fitness(self):
        """ Evaluate Candidate

        :return: Fitness from 0 to 1 and 1.5 if it is a valid Solution
        """
        if (self._fitness is None):
            constraint_errors = self._validator.check(self.assignments)
            if constraint_errors == 0:
                self._fitness = 1.5
            else:
                self._fitness = 1 / constraint_errors
        return self._fitness

    def _crossing_one_point_swap(self, other_candidate):
        """ One Point crossing

        :param other_candidate:
        :return:
        """
        cut_idx = randTo(4*19+3)  # 19 times 4 cut points + 3 in the end
        swap_idx = None
        if 0.5 < random.random():
            swap_idx = range(cut_idx, 4*20-1)  # Forward from idx
        else:
            swap_idx = range(0, cut_idx)# Backward from idx
        for idx in swap_idx:
            # Split long index in Lecture and field index
            lecture_idx = int( idx /4 )
            field_idx = idx % 4
            tmp = other_candidate.assignments[lecture_idx][field_idx]
            other_candidate.assignments[lecture_idx][field_idx] = self.assignments[lecture_idx][field_idx]
            self.assignments[lecture_idx][field_idx] = tmp

    def _crossing_two_point_swap(self, other_candidate):
        """ Two Point Crossing

        :param other_candidate:
        :return:
        """
        cut_from = random.randint(0, 4*19+3)
        cut_to = random.randint(0, 4*19+3)

        if cut_from < cut_to:
            swap_idx = range(cut_from, cut_to)
        else:
            swap_idx = range(cut_to, cut_from)

        for idx in swap_idx:
            # Split long index in Lecture and field index
            lecture_idx = int( idx /4 )
            field_idx = idx % 4
            tmp = other_candidate.assignments[lecture_idx][field_idx]
            other_candidate.assignments[lecture_idx][field_idx] = self.assignments[lecture_idx][field_idx]
            self.assignments[lecture_idx][field_idx] = tmp
Esempio n. 36
0
 async def handle_rpc(cls, request: Request) -> Response:
     """Method handle rpc method and params and call function for handling it"""
     body = await cls._get_body(request)
     Validator.validate('request_body', body)
     return await cls._handlers.get(body['method'])(body['params'],
                                                    request['db_session'])
Esempio n. 37
0
 def test_age(self):
     v = Validator()
     self.assertTrue(v.check_age('01'))
     self.assertTrue(v.check_age('99'))
Esempio n. 38
0
 def validate(param_validating_rules):
     return Validator.validate(param_validating_rules)
Esempio n. 39
0
def convert(input_path, verbose=False):
    if verbose:
        print("Begin converting")
    # Collect the metadata
    dataset_metadata = {
        "globus_subject":
        "http://datadryad.org/resource/doi:10.5061/dryad.dd56c",
        "acl": ["public"],
        "mdf_source_name":
        "cip",
        "mdf-publish.publication.collection":
        "Classical Interatomic Potentials",
        "cite_as": [
            "Choudhary K, Congo FYP, Liang T, Becker C, Hennig RG, Tavazza F (2017) Evaluation and comparison of classical interatomic potentials through a user-friendly interactive web-interface. Scientific Data 4: 160125. http://dx.doi.org/10.1038/sdata.2016.125",
            "Choudhary K, Congo FYP, Liang T, Becker C, Hennig RG, Tavazza F (2017) Data from: Evaluation and comparison of classical interatomic potentials through a user-friendly interactive web-interface. Dryad Digital Repository. http://dx.doi.org/10.5061/dryad.dd56c"
        ],
        "dc.title":
        "Evaluation and comparison of classical interatomic potentials through a user-friendly interactive web-interface",
        "dc.creator":
        "National Institute of Standards and Technology",
        "dc.identifier":
        "http://dx.doi.org/10.5061/dryad.dd56c",
        "dc.contributor.author": [
            "Choudhary K", "Congo FYP", "Liang T", "Becker C", "Hennig RG",
            "Tavazza F"
        ],
        "dc.subject": [
            "interatomic potentials", "force-fields", "total energy", "energy",
            "elastic matrix", "structure", "elastic modulus", "JARVIS"
        ],
        #        "dc.description": ,
        "dc.relatedidentifier": ["http://dx.doi.org/10.1038/sdata.2016.125"],
        "dc.year":
        2017
    }

    # Make a Validator to help write the feedstock
    # You must pass the metadata to the constructor
    # Each Validator instance can only be used for a single dataset
    dataset_validator = Validator(dataset_metadata)

    # Get the data
    # Each record also needs its own metadata
    with open(input_path) as in_file:
        for record in tqdm(json.load(in_file),
                           desc="Converting data",
                           disable=not verbose):
            record_metadata = {
                "globus_subject":
                record["case-number"],
                "acl": ["public"],
                "mdf-publish.publication.collection":
                "Classical Interatomic Potentials",
                #                "mdf_data_class": ,
                "mdf-base.material_composition":
                record["composition"],
                "dc.title":
                "NIST Classical Interatomic Potential - " +
                record["forcefield"] + ", " + record["composition"],
                #                "dc.creator": ,
                "dc.identifier":
                record["case-number"],
                #                "dc.contributor.author": ,
                #                "dc.subject": ,
                #                "dc.description": ,
                #                "dc.relatedidentifier": ,
                #                "dc.year": ,
                "data": {
                    "raw": json.dumps(record),
                    "files": {}
                }
            }

            # Pass each individual record to the Validator
            result = dataset_validator.write_record(record_metadata)

            # Check if the Validator accepted the record, and print a message if it didn't
            # If the Validator returns "success" == True, the record was written successfully
            if result["success"] is not True:
                print("Error:", result["message"], ":",
                      result.get("invalid_metadata", ""))

    if verbose:
        print("Finished converting")
Esempio n. 40
0
        FullDialogueTrackerFeaturizer(LabelTokenizerSingleStateFeaturizer()))
    #agent = Agent(domain_file, policies=[
    #        keras_2,
    #        MemoizationPolicy(max_history=MAX_HISTORY),
    #                                                CustomFallbackPolicy(
    #                    fallback_action_name=FALLBACK_ACTION_NAME,
    #                    nlu_threshold=NLU_THRESHOLD,
    #                    core_threshold=CORE_THRESHOLD)])

    agent = Agent(domain_file,
                  policies=[
                      keras_2,
                      MemoizationPolicy(max_history=MAX_HISTORY),
                      FallbackPolicy(nlu_threshold=NLU_THRESHOLD,
                                     core_threshold=CORE_THRESHOLD)
                  ])

    training_data = agent.load_data(training_folder, augmentation_factor=20)

    agent.train(training_data,
                epochs=TRAINING_EPOCHS,
                batch_size=BATCH_SIZE,
                validation_split=VALIDATION_SPLIT)
    agent.persist(model_path)


if __name__ == "__main__":
    validate = Validator('domain.yml', 'data/intents', 'data/stories/')
    validate.run_verifications()
    train_dialogue('domain.yml', 'models/dialogue', 'data/stories/')
Esempio n. 41
0
from validator import Validator

validate = Validator()

if not validate.is_comment(777):
    print("PASS")
else:
    print("FAIL")

if not validate.is_comment(7.77):
    print("PASS")
else:
    print("FAIL")

if not validate.is_comment(False):
    print("PASS")
else:
    print("FAIL")

if validate.is_comment("#comment"):
    print("PASS")
else:
    print("FAIL")

if not validate.is_comment(""):
    print("PASS")
else:
    print("FAIL")

if not validate.is_comment("not comment"):
    print("PASS")
def convert(input_path, metadata=None, verbose=False):
    if verbose:
        print("Begin converting")

    # Collect the metadata
    # TODO: Make sure the metadata is present in some form.
    # Fields can be:
    #    REQ (Required, must be present)
    #    RCM (Recommended, should be present if possible)
    #    OPT (Optional, can be present if useful)
    if not metadata:
        dataset_metadata = {
#            "globus_subject": ,                      # REQ string: Unique value (should be URI if possible)
#            "acl": ,                                 # REQ list of strings: UUID(s) of users/groups allowed to access data, or ["public"]
#            "mdf_source_name": ,                     # REQ string: Unique name for dataset
#            "mdf-publish.publication.collection": ,  # RCM string: Collection the dataset belongs to
#            "mdf_data_class": ,                      # RCM string: Type of data in all records in the dataset (do not provide for multi-type datasets)

#            "cite_as": ,                             # REQ list of strings: Complete citation(s) for this dataset.
#            "license": ,                             # RCM string: License to use the dataset (preferrably a link to the actual license).
#            "mdf_version": ,                         # REQ string: The metadata version in use (see VERSION above).

#            "dc.title": ,                            # REQ string: Title of dataset
#            "dc.creator": ,                          # REQ string: Owner of dataset
#            "dc.identifier": ,                       # REQ string: Link to dataset (dataset DOI if available)
#            "dc.contributor.author": ,               # RCM list of strings: Author(s) of dataset
#            "dc.subject": ,                          # RCM list of strings: Keywords about dataset
#            "dc.description": ,                      # RCM string: Description of dataset contents
#            "dc.relatedidentifier": ,                # RCM list of strings: Link(s) to related materials (such as an article)
#            "dc.year":                               # RCM integer: Year of dataset creation
            }
    elif type(metadata) is str:
        try:
            dataset_metadata = json.loads(metadata)
        except Exception:
            try:
                with open(metadata, 'r') as metadata_file:
                    dataset_metadata = json.load(metadata_file)
            except Exception as e:
                sys.exit("Error: Unable to read metadata: " + repr(e))
    elif type(metadata) is dict:
        dataset_metadata = metadata
    else:
        sys.exit("Error: Invalid metadata parameter")



    # Make a Validator to help write the feedstock
    # You must pass the metadata to the constructor
    # Each Validator instance can only be used for a single dataset
    dataset_validator = Validator(dataset_metadata, strict=False)
    # You can also force the Validator to treat warnings as errors with strict=True
    #dataset_validator = Validator(dataset_metadata, strict=True)


    # Get the data
    # TODO: Write the code to convert your dataset's records into JSON-serializable Python dictionaries
    #    Each record should be exactly one dictionary
    #    It is recommended that you convert your records one at a time, but it is possible to put them all into one big list (see below)
    #    It is also recommended that you use a parser to help with this process if one is available for your datatype

    # Each record also needs its own metadata
    for record in your_records:
        # TODO: Fill in these dictionary fields for each record
        # Fields can be:
        #    REQ (Required, must be present)
        #    RCM (Recommended, should be present if possible)
        #    OPT (Optional, can be present if useful)
        record_metadata = {
            "globus_subject": ,                      # REQ string: Unique value (should be URI to record if possible)
            "acl": ,                                 # REQ list of strings: UUID(s) of users/groups allowed to access data, or ["public"]
            "mdf-publish.publication.collection": ,  # OPT string: Collection the record belongs to (if different from dataset)
            "mdf_data_class": ,                      # OPT string: Type of data in record (if not set in dataset metadata)
            "mdf-base.material_composition": ,       # RCM string: Chemical composition of material in record

            "cite_as": ,                             # OPT list of strings: Complete citation(s) for this record (if different from dataset)
            "license": ,                             # OPT string: License to use the record (if different from dataset) (preferrably a link to the actual license).

            "dc.title": ,                            # REQ string: Title of record
            "dc.creator": ,                          # OPT string: Owner of record (if different from dataset)
            "dc.identifier": ,                       # RCM string: Link to record (record webpage, if available)
            "dc.contributor.author": ,               # OPT list of strings: Author(s) of record (if different from dataset)
            "dc.subject": ,                          # OPT list of strings: Keywords about record
            "dc.description": ,                      # OPT string: Description of record
            "dc.relatedidentifier": ,                # OPT list of strings: Link(s) to related materials (if different from dataset)
            "dc.year": ,                             # OPT integer: Year of record creation (if different from dataset)

            "data": {                                # RCM dictionary: Other record data (described below)
                "raw": ,                             # RCM string: Original data record text, if feasible
                "files": ,                           # RCM dictionary: {file_type : uri_to_file} pairs, data files (Example: {"cif" : "https://example.org/cifs/data_file.cif"})

                # other                              # RCM any JSON-valid type: Any other data fields you would like to include go in the "data" dictionary. Keys will be prepended with 'mdf_source_name:'
                }
            }

        # Pass each individual record to the Validator
        result = dataset_validator.write_record(record_metadata)

        # Check if the Validator accepted the record, and print a message if it didn't
        # If the Validator returns "success" == True, the record was written successfully
        if result["success"] is not True:
            print("Error:", result["message"], ":", result.get("invalid_metadata", ""))
        # The Validator may return warnings if strict=False, which should be noted
        if result.get("warnings", None):
            print("Warnings:", result["warnings"])

    # Alternatively, if the only way you can process your data is in one large list, you can pass the list to the Validator
    # You still must add the required metadata to your records
    # It is recommended to use the previous method if possible
    # result = dataset_validator.write_dataset(your_records_with_metadata)
    #if result["success"] is not True:
        #print("Error:", result["message"])

    # TODO: Save your converter as [mdf_source_name]_converter.py
    # You're done!
    if verbose:
        print("Finished converting")
Esempio n. 43
0
 def test_sales(self):
     v = Validator()
     self.assertTrue(v.check_sales('001'))
     self.assertTrue(v.check_sales('999'))
     self.assertFalse(v.check_sales('99'))
Esempio n. 44
0
from flask import Flask, render_template, request
from werkzeug.utils import secure_filename
from validator import Validator

app = Flask(__name__, instance_relative_config=True)
app.config.from_object('config.DevConfig')
upload_folder = app.config["UPLOAD_FOLDER"]
validator = Validator()


@app.route("/")
def index():
    return render_template("index.html")


@app.route("/generate")
def generate():
    return render_template("generate.html")


@app.route("/upload", methods=["POST"])
def upload():
    uploaded_file = request.files["file"]
    UI_msg, file_detected = validator.validate_xml(uploaded_file.filename)
    if file_detected:
        return "File DETECTED"
    else:
        return render_template("generate.html", msg=UI_msg)


if __name__ == "__main__":
Esempio n. 45
0
 def test_validate_date(self, MockInput):
     """Test that a user can enter a valid date"""
     user_input = ['30/12/2018']
     MockInput.side_effect = user_input
     expected_input = Validator.validate_date()
     self.assertEqual(expected_input, test_entry_1['date'])
Esempio n. 46
0
 def test_gender(self):
     v = Validator()
     # Rosemary
     self.assertTrue(v.check_gender('M'))
     self.assertTrue(v.check_gender('F'))
     self.assertTrue(v.check_gender('0'))
     self.assertTrue(v.check_gender(''))
     # Tim
     self.assertFalse(v.check_gender('m'))
     self.assertFalse(v.check_gender('f'))
     self.assertFalse(v.check_gender(True))
     self.assertFalse(v.check_gender(1))
     self.assertFalse(v.check_gender(None))
     self.assertFalse(v.check_gender({}))
     self.assertFalse(v.check_gender('MF'))
Esempio n. 47
0
def convert(input_path, metadata=None, verbose=False):
    if verbose:
        print("Begin converting")

    # Collect the metadata
    if not metadata:
        dataset_metadata = {
            "globus_subject":
            "http://www.west-code.org/database/gwsoc81/index.php",  # REQ string: Unique value (should be URI if possible)
            "acl": [
                "public"
            ],  # REQ list of strings: UUID(s) of users/groups allowed to access data, or ["public"]
            "mdf_source_name":
            "gw_soc81",  # REQ string: Unique name for dataset
            "mdf-publish.publication.collection":
            "GW-SOC81",  # RCM string: Collection the dataset belongs to
            #            "mdf_data_class": ,                      # RCM string: Type of data in all records in the dataset (do not provide for multi-type datasets)
            "cite_as": [
                "P. Scherpelz, M. Govoni, I. Hamada, and G. Galli, Implementation and Validation of Fully-Relativistic GW Calculations: Spin-Orbit Coupling in Molecules, Nanocrystals and Solids, J. Chem. Theory Comput. 12, 3523 (2016).",
                "P.J. Linstrom and W.G. Mallard, Eds., NIST Chemistry WebBook, NIST Standard Reference Database Number 69, National Institute of Standards and Technology, Gaithersburg MD, 20899, http://webbook.nist.gov."
            ],  # REQ list of strings: Complete citation(s) for this dataset.
            #            "license": ,                             # RCM string: License to use the dataset (preferrably a link to the actual license).
            "dc.title":
            "Benchmark of G0W0 on 81 Molecules with Spin-Orbit Coupling",  # REQ string: Title of dataset
            "dc.creator":
            "The University of Chicago, Argonne National Laboratory",  # REQ string: Owner of dataset
            "dc.identifier":
            "http://www.west-code.org/database/gwsoc81/index.php",  # REQ string: Link to dataset (dataset DOI if available)
            #            "dc.contributor.author": ,               # RCM list of strings: Author(s) of dataset
            #            "dc.subject": ,                          # RCM list of strings: Keywords about dataset
            #           "dc.description": ,                      # RCM string: Description of dataset contents
            #            "dc.relatedidentifier": ,                # RCM list of strings: Link(s) to related materials (such as an article)
            #            "dc.year":                               # RCM integer: Year of dataset creation
        }
    elif type(metadata) is str:
        try:
            with open(metadata, 'r') as metadata_file:
                dataset_metadata = json.load(metadata_file)
        except Exception as e:
            sys.exit("Error: Unable to read metadata: " + repr(e))
    elif type(metadata) is dict:
        dataset_metadata = metadata
    else:
        sys.exit("Error: Invalid metadata parameter")

    # Make a Validator to help write the feedstock
    # You must pass the metadata to the constructor
    # Each Validator instance can only be used for a single dataset
#    dataset_validator = Validator(dataset_metadata, strict=False)
# You can also force the Validator to treat warnings as errors with strict=True
    dataset_validator = Validator(dataset_metadata, strict=True)

    # Get the data
    # Each record also needs its own metadata
    with open(input_path) as in_file:
        data = in_file.read()
    for record in tqdm(parse_tab(data),
                       desc="Processing records",
                       disable=not verbose):
        link = "http://www.west-code.org/database/gwsoc81/pag/" + record[
            "cas"] + ".php"
        record_metadata = {
            "globus_subject":
            link,  # REQ string: Unique value (should be URI to record if possible)
            "acl": [
                "public"
            ],  # REQ list of strings: UUID(s) of users/groups allowed to access data, or ["public"]
            #            "mdf-publish.publication.collection": ,  # OPT string: Collection the record belongs to (if different from dataset)
            #            "mdf_data_class": ,                      # OPT string: Type of data in record (if not set in dataset metadata)
            "mdf-base.material_composition": record[
                "formula"],  # RCM string: Chemical composition of material in record

            #            "cite_as": ,                             # OPT list of strings: Complete citation(s) for this record (if different from dataset)
            #            "license": ,                             # OPT string: License to use the record (if different from dataset) (preferrably a link to the actual license).
            "dc.title":
            "GW-SOC81 - " + record["name"],  # REQ string: Title of record
            #            "dc.creator": ,                          # OPT string: Owner of record (if different from dataset)
            "dc.identifier":
            link,  # RCM string: Link to record (record webpage, if available)
            #            "dc.contributor.author": ,               # OPT list of strings: Author(s) of record (if different from dataset)
            #            "dc.subject": ,                          # OPT list of strings: Keywords about record
            #            "dc.description": ,                      # OPT string: Description of record
            #            "dc.relatedidentifier": ,                # OPT list of strings: Link(s) to related materials (if different from dataset)
            #            "dc.year": ,                             # OPT integer: Year of record creation (if different from dataset)
            "data": {  # RCM dictionary: Other record data (described below)
                #                "raw": ,                             # RCM string: Original data record text, if feasible
                #                "files": ,                           # RCM dictionary: {file_type : uri_to_file} pairs, data files (Example: {"cif" : "https://example.org/cifs/data_file.cif"})
                "name": record["name"]
                #                "cas_number": record["cas"]
                # other                              # RCM any JSON-valid type: Any other data fields you would like to include go in the "data" dictionary. Keys will be prepended with 'mdf_source_name:'
            }
        }

        # Pass each individual record to the Validator
        result = dataset_validator.write_record(record_metadata)

        # Check if the Validator accepted the record, and print a message if it didn't
        # If the Validator returns "success" == True, the record was written successfully
        if result["success"] is not True:
            print("Error:", result["message"], ":",
                  result.get("invalid_metadata", ""))
        # The Validator may return warnings if strict=False, which should be noted
        if result.get("warnings", None):
            print("Warnings:", result["warnings"])

    if verbose:
        print("Finished converting")
Esempio n. 48
0
"""

import sys
from flask import request, jsonify, abort, Flask
from database.setup import Session, Base, engine
from validator import Validator
from processor import Processor
from scorer import Scorer
from authenticator import Authenticator
from measurer import Measurer

Base.metadata.create_all(engine)
app = Flask(__name__)
scorer = Scorer(app.logger)
authenticator = Authenticator(app.logger)
validator = Validator(app.logger, authenticator)
processor = Processor(app.logger, scorer, authenticator)
measurer = Measurer(app.logger)

# set up logging (for linux)
if sys.platform == "linux2":
    from logging import FileHandler, INFO
    handler = FileHandler("/tmp/mango_logs.txt")
    handler.setLevel(INFO)
    app.logger.addHandler(handler)


@app.route("/")
def hello():
    return "Hello, world!"
Esempio n. 49
0
 def __init__(self, expenses = []):
     self.expenses = expenses
     self.undo_actions = UndoActions(expenses)
     self.validator = Validator()
Esempio n. 50
0
def convert(input_path, metadata=None, verbose=False):
    if verbose:
        print("Begin converting")

    # Collect the metadata
    if not metadata:
        dataset_metadata = {
            "globus_subject":
            "http://qmml.org/datasets.html#gdb9-14",
            "acl": ["public"],
            "mdf_source_name":
            "gdb9-14",
            "mdf-publish.publication.collection":
            "gdb9-14",
            #            "mdf_data_class": ,
            "cite_as": [
                "Raghunathan Ramakrishnan, Pavlo Dral, Matthias Rupp, O. Anatole von Lilienfeld: Quantum Chemistry Structures and Properties of 134 kilo Molecules, Scientific Data 1: 140022, 2014."
            ],
            "license":
            "https://creativecommons.org/licenses/by-nc-sa/4.0/",
            "mdf_version":
            "0.1.0",
            "dc.title":
            "Quantum Chemistry Structures and Properties of 134 kilo Molecules",
            "dc.creator":
            "Ramakrishnan, R., Dral, P. O., Rupp, M. & von lilienfeld, O. A",
            "dc.identifier":
            "http://qmml.org/datasets.html#gdb9-14",
            "dc.contributor.author": [
                "Raghunathan Ramakrishnan", "Pavlo Dral", "Matthias Rupp",
                "O. Anatole von Lilienfeld"
            ],
            "dc.subject": [
                "Computational chemistry", "Density functional theory",
                "Quantum chemistry"
            ],
            "dc.description":
            "133,885 small organic molecules with up to 9 C, O, N, F atoms, saturated with H. Geometries, harmonic frequencies, dipole moments, polarizabilities, energies, enthalpies, and free energies of atomization at the DFT/B3LYP/6-31G(2df,p) level of theory. For a subset of 6,095 constitutional isomers of C7H10O2, energies, enthalpies, and free energies of atomization are provided at the G4MP2 level of theory.",
            "dc.relatedidentifier":
            ["http://dx.doi.org/10.1038/sdata.2014.22"],
            "dc.year":
            2014
        }
    elif type(metadata) is str:
        try:
            dataset_metadata = json.loads(metadata)
        except Exception:
            try:
                with open(metadata, 'r') as metadata_file:
                    dataset_metadata = json.load(metadata_file)
            except Exception as e:
                sys.exit("Error: Unable to read metadata: " + repr(e))
    elif type(metadata) is dict:
        dataset_metadata = metadata
    else:
        sys.exit("Error: Invalid metadata parameter")

    # Make a Validator to help write the feedstock
    # You must pass the metadata to the constructor
    # Each Validator instance can only be used for a single dataset
    #dataset_validator = Validator(dataset_metadata, strict=False)
    # You can also force the Validator to treat warnings as errors with strict=True
    dataset_validator = Validator(dataset_metadata, strict=True)

    # Get the data
    #    Each record should be exactly one dictionary
    #    It is recommended that you convert your records one at a time, but it is possible to put them all into one big list (see below)
    #    It is also recommended that you use a parser to help with this process if one is available for your datatype

    # Each record also needs its own metadata
    for data_file in tqdm(find_files(input_path, "xyz"),
                          desc="Processing files",
                          disable=not verbose):
        record = parse_pymatgen(
            os.path.join(data_file["path"], data_file["filename"]))
        if record["structure"]:
            comp = record["structure"]["material_composition"]
        elif record["molecule"]:
            comp = record["molecule"]["material_composition"]
        uri = "https://data.materialsdatafacility.org/collections/" + "gdb9-14/" + data_file[
            "no_root_path"] + "/" + data_file["filename"]
        index = ""
        if data_file["no_root_path"] == "dsgdb9nsd.xyz":
            start = data_file["filename"].find('_')
            index = int(data_file["filename"][start + 1:-4])
        record_metadata = {
            "globus_subject": uri,
            "acl": ["public"],
            #            "mdf-publish.publication.collection": ,
            #            "mdf_data_class": ,
            "mdf-base.material_composition": comp,

            #            "cite_as": ,
            #            "license": ,
            "dc.title": "gdb9-14 - " + data_file["filename"],
            #            "dc.creator": ,
            "dc.identifier": uri,
            #            "dc.contributor.author": ,
            #            "dc.subject": ,
            #            "dc.description": ,
            #            "dc.relatedidentifier": ,
            #            "dc.year": ,
            "data": {
                #                "raw": ,
                #                "files": ,
                "index": index
            }
        }

        # Pass each individual record to the Validator
        result = dataset_validator.write_record(record_metadata)

        # Check if the Validator accepted the record, and print a message if it didn't
        # If the Validator returns "success" == True, the record was written successfully
        if result["success"] is not True:
            print("Error:", result["message"], ":",
                  result.get("invalid_metadata", ""))
        # The Validator may return warnings if strict=False, which should be noted
        if result.get("warnings", None):
            print("Warnings:", result["warnings"])

    # Alternatively, if the only way you can process your data is in one large list, you can pass the list to the Validator
    # You still must add the required metadata to your records
    # It is recommended to use the previous method if possible
    # result = dataset_validator.write_dataset(your_records_with_metadata)
    #if result["success"] is not True:
    #print("Error:", result["message"])

    # You're done!
    if verbose:
        print("Finished converting")
Esempio n. 51
0
def convert(input_path, metadata=None, verbose=False):
    if verbose:
        print("Begin converting")

    # Collect the metadata
    if not metadata:
        dataset_metadata = {
            "globus_subject":
            "http://www.cxidb.org/",
            "acl": ["public"],
            "mdf_source_name":
            "cxidb",
            "mdf-publish.publication.collection":
            "CXIDB",
            #            "mdf_data_class": ,
            "cite_as": [
                "Maia, F. R. N. C. The Coherent X-ray Imaging Data Bank. Nat. Methods 9, 854–855 (2012)."
            ],
            #            "license": ,
            "dc.title":
            "The Coherent X-ray Imaging Data Bank",
            "dc.creator":
            "CXIDB",
            "dc.identifier":
            "http://www.cxidb.org/",
            "dc.contributor.author": ["Maia, F. R. N. C."],
            #            "dc.subject": ,
            "dc.description":
            "A new database which offers scientists from all over the world a unique opportunity to access data from Coherent X-ray Imaging (CXI) experiments.",
            #            "dc.relatedidentifier": ,
            "dc.year":
            2012
        }
    elif type(metadata) is str:
        try:
            with open(metadata, 'r') as metadata_file:
                dataset_metadata = json.load(metadata_file)
        except Exception as e:
            sys.exit("Error: Unable to read metadata: " + repr(e))
    elif type(metadata) is dict:
        dataset_metadata = metadata
    else:
        sys.exit("Error: Invalid metadata parameter")

    # Make a Validator to help write the feedstock
    # You must pass the metadata to the constructor
    # Each Validator instance can only be used for a single dataset
#    dataset_validator = Validator(dataset_metadata)
# You can also force the Validator to treat warnings as errors with strict=True
    dataset_validator = Validator(dataset_metadata, strict=True)

    # Get the data
    # Each record also needs its own metadata
    for dir_data in tqdm(find_files(input_path,
                                    file_pattern="json",
                                    verbose=verbose),
                         desc="Processing metadata",
                         disable=not verbose):
        with open(os.path.join(dir_data["path"],
                               dir_data["filename"])) as file_data:
            cxidb_data = json.load(file_data)
        record_metadata = {
            "globus_subject":
            cxidb_data["url"],
            "acl": ["public"],
            #            "mdf-publish.publication.collection": ,
            #            "mdf_data_class": ,
            #            "mdf-base.material_composition": ,

            #            "cite_as": ,
            #            "license": ,
            "dc.title":
            cxidb_data["citation_title"],
            #            "dc.creator": ,
            "dc.identifier":
            cxidb_data["url"],
            "dc.contributor.author": [cxidb_data["citation_authors"]]
            if type(cxidb_data["citation_authors"]) is str else
            cxidb_data["citation_authors"],
            #            "dc.subject": ,
            #            "dc.description": ,
            "dc.relatedidentifier": [
                cxidb_data.get("citation_DOI", None),
                cxidb_data.get("entry_DOI", None)
            ],
            "dc.year":
            int(cxidb_data["summary_deposition_date"][:4]),
            "data": {
                "raw": json.dumps(cxidb_data)
                #                "files": ,
            }
        }

        # Pass each individual record to the Validator
        result = dataset_validator.write_record(record_metadata)

        # Check if the Validator accepted the record, and print a message if it didn't
        # If the Validator returns "success" == True, the record was written successfully
        if result["success"] is not True:
            print("Error:", result["message"], ":",
                  result.get("invalid_metadata", ""))
        # The Validator may return warnings if strict=False, which should be noted
        if result.get("warnings", None):
            print("Warnings:", result["warnings"])

    if verbose:
        print("Finished converting")
Esempio n. 52
0
 def test_birthday(self):
     v = Validator()
     self.assertTrue(v.check_birthday('1-1-1996'))
     self.assertTrue(v.check_birthday('31-12-1971'))
     self.assertTrue(v.check_birthday('31-12-1171'))
     self.assertTrue(v.check_birthday('31-12-3171'))
     self.assertFalse(v.check_birthday(56186729))
     self.assertFalse(v.check_birthday('1/1/1996'))
     self.assertFalse(v.check_birthday("Jan-31-1971"))
     self.assertFalse(v.check_birthday(True))
     self.assertFalse(v.check_birthday(""))
     self.assertFalse(v.check_birthday("--"))
Esempio n. 53
0
def convert(input_path, metadata=None, verbose=False):
    if verbose:
        print("Begin converting")

    # Collect the metadata
    if not metadata:
        dataset_metadata = {
            "globus_subject":
            "http://solarfuelshub.org/benchmarking-database",
            "acl": ["public"],
            "mdf_source_name":
            "jcap_benchmarking_db",
            "mdf-publish.publication.collection":
            "JCAP Benchmarking DB",
            #            "mdf_data_class": ,
            "cite_as": [
                "McCrory, C. C. L., Jung, S. H., Peters, J. C. & Jaramillo, T. F. Benchmarking Heterogeneous Electrocatalysts for the Oxygen Evolution Reaction. Journal of the American Chemical Society 135, 16977-16987, DOI: 10.1021/ja407115p (2013)",
                "McCrory, C. C. L. et al. Benchmarking HER and OER Electrocatalysts for Solar Water Splitting Devices. Journal of the American Chemical Society, 137, 4347–4357, DOI: 10.1021/ja510442p (2015)"
            ],
            #            "license": ,
            "dc.title":
            "JCAP Benchmarking Database",
            "dc.creator":
            "JCAP",
            "dc.identifier":
            "http://solarfuelshub.org/benchmarking-database",
            #            "dc.contributor.author": ,
            #            "dc.subject": ,
            "dc.description":
            "The JCAP Benchmarking scientists developed and implemented uniform methods and protocols for characterizing the activities of electrocatalysts under standard operating conditions for water-splitting devices. They have determined standard measurement protocols that reproducibly quantify catalytic activity and stability. Data for several catalysts studied are made available in this database.",
            #            "dc.relatedidentifier": ,
            #            "dc.year":
        }
    elif type(metadata) is str:
        try:
            with open(metadata, 'r') as metadata_file:
                dataset_metadata = json.load(metadata_file)
        except Exception as e:
            sys.exit("Error: Unable to read metadata: " + repr(e))
    elif type(metadata) is dict:
        dataset_metadata = metadata
    else:
        sys.exit("Error: Invalid metadata parameter")

    # Make a Validator to help write the feedstock
    # You must pass the metadata to the constructor
    # Each Validator instance can only be used for a single dataset
#    dataset_validator = Validator(dataset_metadata, strict=False)
# You can also force the Validator to treat warnings as errors with strict=True
    dataset_validator = Validator(dataset_metadata, strict=True)

    # Get the data
    # Each record also needs its own metadata
    for data_file in tqdm(find_files(input_path, ".txt"),
                          desc="Processing files",
                          disable=not verbose):
        with open(os.path.join(data_file["path"],
                               data_file["filename"])) as in_file:
            record = {}
            key = ""
            for line in in_file:
                clean_line = line.strip()
                if clean_line.endswith(":"):
                    key = clean_line.strip(": ").lower().replace(" ", "_")
                else:
                    record[key] = clean_line
        link = "https://internal.solarfuelshub.org/jcapresources/benchmarking/catalysts_for_iframe/view/jcapbench_catalyst/" + data_file[
            "filename"][:-4]
        record_metadata = {
            "globus_subject": link,
            "acl": ["public"],
            #            "mdf-publish.publication.collection": ,
            #            "mdf_data_class": ,
            "mdf-base.material_composition": record["catalyst"],
            "cite_as": [record["publication"]],
            #            "license": ,
            "dc.title": "JCAP Benchmark - " + record["catalyst"],
            #            "dc.creator": ,
            "dc.identifier": link,
            #            "dc.contributor.author": ,
            #            "dc.subject": ,
            #            "dc.description": ,
            #            "dc.relatedidentifier": ,
            "dc.year": int(record["release_date"][:4]),
            "data": {
                #                "raw": ,
                #                "files":
            }
        }
        record_metadata["data"].update(record)

        # Pass each individual record to the Validator
        result = dataset_validator.write_record(record_metadata)

        # Check if the Validator accepted the record, and print a message if it didn't
        # If the Validator returns "success" == True, the record was written successfully
        if result["success"] is not True:
            print("Error:", result["message"], ":",
                  result.get("invalid_metadata", ""))
        # The Validator may return warnings if strict=False, which should be noted
        if result.get("warnings", None):
            print("Warnings:", result["warnings"])

    if verbose:
        print("Finished converting")
Esempio n. 54
0
from generate_transactions import gen_alice_and_bob_tx

from config import *

# Setup
GENESIS_BLOCKS = {}
GENESIS_MESSAGES = []
for ID in SHARD_IDS:
    GENESIS_BLOCKS[ID] = Block(ID)
    GENESIS_MESSAGES.append(ConsensusMessage(
        GENESIS_BLOCKS[ID], 0,
        []))  # The watcher is the sender of the genesis blocks

validators = {}
for name in VALIDATOR_NAMES:
    validators[name] = Validator(name)

#  Watcher lives at validator name 0 and receives all the messages
watcher = validators[0]

for v in VALIDATOR_NAMES:
    for genesis_message in GENESIS_MESSAGES:
        validators[v].receive_consensus_message(genesis_message)

# GLOBAL MEMPOOLS
mempools = {}
txs = gen_alice_and_bob_tx()
for ID in SHARD_IDS:
    mempools[ID] = txs

# GLOBAL VIEWABLES
Esempio n. 55
0
Downloading module.
"""
import tweepy
import json
from time import sleep

from validator import Validator
from storage.files import FileHandler


# load config file
with open('config.json') as f:
    config_json = json.load(f)

# Validator of configuration file
validator = Validator()
validator.validate_downloader_config(config_json)


api_cred = config_json['twitter_api']

# create authentication objects
auth = tweepy.OAuthHandler(api_cred['cost_key'], api_cred['cost_skey'])
auth.set_access_token(api_cred['acc_key'], api_cred['acc_skey'])

# create API
api = tweepy.API(
    auth,
    wait_on_rate_limit=True,
    wait_on_rate_limit_notify=True,
    retry_count=10,
Esempio n. 56
0
 def test_validate_title(self, MockInput):
     """Test that a user can enter a valid task title"""
     user_input = [test_entry_1['title']]
     MockInput.side_effect = user_input
     expected_input = Validator.validate_title()
     self.assertEqual(expected_input, test_entry_1['title'])
Esempio n. 57
0
 def test_attributes(self):
     v = Validator()
     self.assertTrue(v.check_in_attributes('EMPID'))
     self.assertTrue(v.check_in_attributes('GENDER'))
     self.assertTrue(v.check_in_attributes('AGE'))
     self.assertTrue(v.check_in_attributes('SALES'))
     self.assertTrue(v.check_in_attributes('BMI'))
     self.assertTrue(v.check_in_attributes('SALARY'))
     self.assertTrue(v.check_in_attributes('9999'))
     self.assertTrue(v.check_in_attributes("BIRTHDAY"))
     self.assertTrue(v.check_in_attributes("birthday"))
     self.assertFalse(v.check_in_attributes(True))
     self.assertFalse(v.check_in_attributes(['EMPID', 'GENDER']))
     self.assertFalse(v.check_in_attributes(None))
     self.assertFalse(v.check_in_attributes(1))
     self.assertTrue(v.check_in_attributes(23333))
Esempio n. 58
0
 def test_bmi(self):
     v = Validator()
     self.assertTrue(v.check_bmi('Normal'))
     self.assertTrue(v.check_bmi('Overweight'))
     self.assertTrue(v.check_bmi('Obesity'))
     self.assertTrue(v.check_bmi('Underweight'))
     self.assertFalse(v.check_bmi('rUnderweight'))
     self.assertFalse(v.check_bmi('Underweight2'))
     self.assertFalse(v.check_bmi('UNDERWEIGHT'))
     self.assertFalse(v.check_bmi(""))
     self.assertTrue(v.check_bmi("lol"))
     self.assertFalse(v.check_bmi(1))
     self.assertFalse(v.check_bmi(True))
Esempio n. 59
0
def convert(input_path, metadata=None, verbose=False):
    if verbose:
        print("Begin converting")

    # Collect the metadata
    if not metadata:
        dataset_metadata = {
            "globus_subject":
            "https://data.materialsdatafacility.org/published/#trinkle_mg_x_diffusion",
            "acl": ["public"],
            "mdf_source_name":
            "trinkle_mg_x_diffusion",
            "mdf-publish.publication.collection":
            "Mg-X Diffusion Dataset",
            "mdf_data_class":
            "vasp",
            "cite_as": [
                "Citation for dataset Mg-X-Diffusion with author(s): Dallas Trinkle, Ravi Agarwal"
            ],
            #            "license": "",
            "dc.title":
            "Mg-X-Diffusion",
            "dc.creator":
            "University of Illinois at Urbana-Champaign",
            "dc.identifier":
            "https://data.materialsdatafacility.org/published/#trinkle_mg_x_diffusion",
            "dc.contributor.author": ["Trinkle, Dallas", "Agarwal, Ravi"],
            #"dc.subject": [],
            #"dc.description": "",
            #            "dc.relatedidentifier": [],
            "dc.year":
            2017
        }
    elif type(metadata) is str:
        try:
            with open(metadata, 'r') as metadata_file:
                dataset_metadata = json.load(metadata_file)
        except Exception as e:
            sys.exit("Error: Unable to read metadata: " + repr(e))
    elif type(metadata) is dict:
        dataset_metadata = metadata
    else:
        sys.exit("Error: Invalid metadata parameter")

    # Make a Validator to help write the feedstock
    # You must pass the metadata to the constructor
    # Each Validator instance can only be used for a single dataset
    dataset_validator = Validator(dataset_metadata)

    # Get the data
    # Each record also needs its own metadata
    for data_file in tqdm(find_files(input_path, "OUTCAR"),
                          desc="Processing files",
                          disable=not verbose):
        record = parse_ase(
            os.path.join(data_file["path"], data_file["filename"]), "vasp")
        uri = "https://data.materialsdatafacility.org/collections/" + "mg-x/" + data_file[
            "no_root_path"] + "/" + data_file["filename"]
        record_metadata = {
            "globus_subject": uri,
            "acl": ["public"],
            #            "mdf-publish.publication.collection": ,
            #            "mdf-base.material_composition": record["frames"][0]["chemical_formula"],

            #            "cite_as": ,
            #            "license": ,
            "dc.title": "Mg-X Diffusions - ",
            #            "dc.creator": ,
            "dc.identifier": uri,
            #            "dc.contributor.author": ,
            #            "dc.subject": ,
            #            "dc.description": ,
            #            "dc.relatedidentifier": ,
            #            "dc.year": ,
            "data": {
                #                "raw": ,
                "files": {
                    "outcar": uri
                }
            }
        }
        try:
            record_metadata["mdf-base.material_composition"] = record[
                "frames"][0]["chemical_formula"]
            record_metadata[
                "dc.title"] += " " + record["frames"][0]["chemical_formula"]
        except:
            #parse_ase unable to read composition of record 1386: https://data.materialsdatafacility.org/collections/mg-x/Elements/Eu/Mg-X_Eu/OUTCAR
            #Placing in the correct material composition
            record_metadata["mdf-base.material_composition"] = "EuMg149"
            record_metadata["dc.title"] += "EuMg149"

        # Pass each individual record to the Validator
        result = dataset_validator.write_record(record_metadata)

        # Check if the Validator accepted the record, and print a message if it didn't
        # If the Validator returns "success" == True, the record was written successfully
        if result["success"] is not True:
            print("Error:", result["message"], ":",
                  result.get("invalid_metadata", ""))

    if verbose:
        print("Finished converting")
Esempio n. 60
0
 def test_id(self):
     v = Validator()
     self.assertFalse(v.check_id('UY7'))
     self.assertFalse(v.check_id('000'))
     self.assertFalse(v.check_id('AAA'))
     self.assertFalse(v.check_id('999'))