def test_query_key_by(db, model_class, data, iteratee, expected): """Test SQLQuery.key_by.""" db.save([model_class(item) for item in data]) results = db.query(model_class).key_by(iteratee) for key, value in results.items(): assert pyd.pick(dict(value), pyd.keys(expected[key])) == expected[key]
def run_tests(module): for key in _.keys(module): try: if key.index('test_') == 0: getattr(module, key).main() except ValueError as e: pass
def to_dict(obj): """Convert `obj` to ``dict`` by created a new ``dict`` using `obj` keys and values. Args: obj: (mixed): Object to convert. Returns: dict: Object converted to ``dict``. Example: >>> obj = {'a': 1, 'b': 2} >>> obj2 = to_dict(obj) >>> obj2 == obj True >>> obj2 is not obj True .. versionadded:: 3.0.0 .. versionchanged:: 4.0.0 Removed alias ``to_plain_object``. """ return dict(zip(pyd.keys(obj), pyd.values(obj)))
def test_query_index_by(db, model_class, data, callback, expected): """Test Query.index_by.""" db.save([model_class(item) for item in data]) results = db.query(model_class).index_by(callback) for key, value in results.items(): assert pyd.pick(dict(value), pyd.keys(expected[key])) == expected[key]
def dict_projection(data, projection): if not projection: return data _1 = False include = {} if not isinstance(data, dict): raise Exception('data is not a dict') if not isinstance(projection, dict): raise Exception('projection is not a dict') keys = _.keys(data) for k, v in projection.items(): if v: _1 = True i = _.find_key(keys, lambda x: x == k or _.starts_with(x, k + '.')) if i > -1: if v: include[keys[i]] = _.get(data, keys[i]) or data[keys[i]] else: data.pop(k) if _1: return include else: return data
def populate(self, **kwargs): """Extended ndb.Model populate method, so it can ignore properties, which are not defined in model class without throwing error """ kwargs = _.omit(kwargs, Base.PUBLIC_PROPERTIES + ['key', 'id']) # We don't want to populate those properties kwargs = _.pick(kwargs, _.keys(self._properties)) # We want to populate only real model properties super(Base, self).populate(**kwargs)
def test_query_stack_by(db, model_class, data, iteratee, expected): """Test SQLQuery.stack_by.""" db.save([model_class(item) for item in data]) results = db.query(model_class).stack_by(iteratee) for key, items in results.items(): items = [pyd.pick(dict(item), pyd.keys(expected[key][0])) for item in items] assert items == expected[key]
def sync(self, notes): try: if not notes: print("is Empty") return {"res": True} if not notes: return print("given 1 args(type=dict)") if not (type(notes) is dict): return print("args must be a dict") # question to user before processing sync self.db.execute('DELETE FROM Note') # clean self.conn.commit() # insert format { Text, WindowPosition, Id, ParentId, Theme, CreatedAt, UpdatedAt } # self.readUser() self.temp = notes print("update") def parser(k): cols = [ "Text", "WindowPosition", "Id", "ParentId", "Theme", "CreatedAt", "UpdatedAt" ] parms = [ self.temp[k]['Text'], self.temp[k]['WindowPosition'], uuid.uuid1(), self.id, self.temp[k]['Theme'], int(time.time()), int(time.time()) ] for index in range(0, len(parms)): parms[index] = "'{0}'".format(parms[index]) if self.debug: for index in range(0, len(cols)): print(cols[index], " = ", parms[index]) sql = "INSERT INTO Note ({0}) VALUES ({1})".format( _.join(cols, ','), _.join(parms, ",")) print("sql", sql) self.db.execute(sql) self.conn.commit() _.for_each(_.keys(notes), parser) print("sync") self.temp = None return {"res": True} except Exception as e: print("{0} sync, check this {0}".format(__file__, e)) return {"res": False}
def stats(name, dataset): if len(dataset.raw_data) > 0: print('\n') print(name + ' raw_data: ' + str(len(dataset.raw_data))) print('example length: ' + str(len(dataset.raw_data[0].split(' ')))) print('******* EXAMPLE ********') print(dataset.raw_data[0]) if len(dataset.sanitized) > 0: print('\n') print(name + ' sanitized: ' + str(len(dataset.sanitized))) print('example length: ' + str(len(dataset.sanitized[0].split(' ')))) print('******* EXAMPLE ********') print(dataset.sanitized[0]) if len(dataset.tokenized) > 0: print('\n') print(name + ' tokenized: ' + str(len(dataset.tokenized))) print('example length: ' + str(len(dataset.tokenized[0]))) print('******* EXAMPLE ********') print(dataset.tokenized[0]) if len(dataset.padded) > 0: print('\n') print(name + ' padded: ' + str(len(dataset.padded))) print('example length: ' + str(len(dataset.padded[0]))) print('******* EXAMPLE ********') print(dataset.padded[0]) if len(dataset.x) > 0: print('\n') print(name + ' x: ' + str(len(dataset.x))) print('example length: ' + str(len(dataset.x[_.keys(dataset.x)[0]]))) print('******* EXAMPLE ********') print(dataset.x[_.keys(dataset.x)[0]]) if len(dataset.valid_x) > 0: print('\n') print(name + ' valid_x: ' + str(len(dataset.valid_x))) print('example length: ' + str(len(dataset.valid_x[_.keys(dataset.valid_x)[0]]))) print('******* EXAMPLE ********') print(dataset.valid_x[_.keys(dataset.valid_x)[0]]) if len(dataset.y) > 0: print('\n') print(name + ' y: ' + str(len(dataset.y))) print('******* EXAMPLE ********') print(dataset.y[_.keys(dataset.y)[0]]) if len(dataset.valid_y) > 0: print('\n') print(name + ' valid_y: ' + str(len(dataset.valid_y))) print('******* EXAMPLE ********') print(dataset.valid_y[_.keys(dataset.valid_y)[0]])
def to_dict(obj): """Convert `obj` to ``dict`` by created a new ``dict`` using `obj` keys and values. Args: obj: (mixed): Object to convert. Returns: dict: Object converted to ``dict``. Example: >>> obj = {'a': 1, 'b': 2} >>> obj2 = to_dict(obj) >>> obj2 == obj True >>> obj2 is not obj True .. versionadded:: 3.0.0 """ return dict(zip(pyd.keys(obj), pyd.values(obj)))
def save_json(name, data): filename = name + '.json' with open(filename, 'w', encoding='utf8') as f: json.dump(data, f, indent=4, sort_keys=False, ensure_ascii=False) f.close() file_train = "assin-ptbr-train" file_test = "assin-ptbr-dev" RSEED = 50 pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) data_inputs = pd.read_json(file_train + '-processed.json') ind = keys(data_inputs) data_inputs = data_inputs.transpose() data_inputs = data_inputs.drop('f1', axis=1) data_inputs = data_inputs.drop('f2', axis=1) data_labels = np.array(load_json(file_train + '-labels.json')) # percentile list perc = [.20, .40, .60, .80] # list of dtypes to include include = ['object', 'float', 'int'] # print(data_inputs.describe(percentiles=perc, include=include)) # print(data_inputs.mean(axis=0)) # exit()
def test_keys(case, expected): assert set(_.keys(case)) == set(expected)
def get_all_properties(cls): """Gets all model's ndb properties""" return ['key', 'id'] + _.keys(cls._properties)