def get_models_startwith_label(self, prefix): models = [] for label, model in self._iter_model_label(): if label.startswith(prefix): models.append(model) return models
def test_soft_unique_together_being_included( common_model_name_not_unique_class_mock, common_model_class_mock, settings_mock): models = [] model_1 = common_model_class_mock('model_1') models.append(model_1) model_2 = common_model_name_not_unique_class_mock( (), model_1, model_1, 'model_2', soft_ut=[('name', 'fk_a')]) models.append(model_2) random.shuffle(models) with mock.patch('awx.main.utils.named_url_graph.settings', settings_mock): generate_graph(models) assert settings_mock.NAMED_URL_GRAPH[model_1].model == model_1 assert settings_mock.NAMED_URL_GRAPH[model_1].fields == ('name', ) assert settings_mock.NAMED_URL_GRAPH[model_1].adj_list == [] assert settings_mock.NAMED_URL_GRAPH[model_2].model == model_2 assert settings_mock.NAMED_URL_GRAPH[model_2].fields == ('name', ) assert zip( *settings_mock.NAMED_URL_GRAPH[model_2].adj_list)[0] == ('fk_a', ) assert [ x.model for x in zip(*settings_mock.NAMED_URL_GRAPH[model_2].adj_list)[1] ] == [model_1]
def get_cql_models(app, connection=None, keyspace=None): """ :param app: django models module :param connection: connection name :param keyspace: keyspace :return: list of all cassandra.cqlengine.Model within app that should be synced to keyspace. """ from .models import DjangoCassandraModel models = [] single_cassandra_connection = len(list(get_cassandra_connections())) == 1 is_default_connection = (connection == DEFAULT_DB_ALIAS or single_cassandra_connection) for name, obj in inspect.getmembers(app): cql_model_types = (cqlengine.models.Model, DjangoCassandraModel) if (inspect.isclass(obj) and issubclass(obj, cql_model_types) and not obj.__abstract__): if (obj.__connection__ == connection or (obj.__connection__ is None and is_default_connection) or obj.__connection__ is None and obj.__keyspace__ is not None and obj.__keyspace__ == keyspace): models.append(obj) return models
def index(self, request, extra_context=None): models = [] if not request.user.has_module_perms(self.app_name): raise http.Http404('The requested admin page does not exist.') for model, model_admin in self.admin_site._registry.items(): if model._meta.app_label != self.app_name: continue perms = model_admin.get_model_perms(request) if not any(perms.values()): continue models.append({ 'name': capfirst(model._meta.verbose_name_plural), 'path': '%s/' % model._meta.module_name, 'perms': perms, }) models.sort(key=lambda x: x['name']) context = { 'title': self.title, 'models': models, 'root_path': self.admin_site.root_path, } context.update(extra_context or {}) return TemplateResponse(request, self.app_index_template or [ 'admin/%s/app_index.html' % self.app_name, 'admin/app_index.html', ], context, current_app=self.admin_site.name)
def get_cql_models(app, keyspace=None): """ :param app: django models module :param keyspace: database name (keyspace) :return: list of all cassandra.cqlengine.Model within app that should be synced to keyspace. """ from cassandra.cqlengine.models import DEFAULT_KEYSPACE from .models import DjangoCassandraModel keyspace = keyspace or DEFAULT_KEYSPACE models = [] for name, obj in inspect.getmembers(app): cql_model_types = ( cqlengine.models.Model, DjangoCassandraModel ) if ( inspect.isclass(obj) and issubclass(obj, cql_model_types) and not obj.__abstract__ ): if (obj.__keyspace__ is None and keyspace == DEFAULT_KEYSPACE) \ or obj.__keyspace__ == keyspace: models.append(obj) return models
def menus(context): data = [] # return request.user.has_perm("%s.%s" % (opts.app_label, codename)) config = get_config('SIMPLEUI_CONFIG') # 如果有menu 就读取,没有就调用系统的 if config and 'menus' in config: data=config.get('menus') pass else: app_list = context.get('app_list') for app in app_list: models = [] if app.get('models'): for m in app.get('models'): models.append({ 'name': str(m.get('name')), 'icon': get_icon(m.get('object_name')), 'url': m.get('admin_url'), 'addUrl': m.get('add_url'), 'breadcrumbs': [str(app.get('name')), str(m.get('name'))] }) module = { 'name': str(app.get('name')), 'icon': get_icon(app.get('app_label')), 'models': models } data.append(module) return '<script type="text/javascript">var menus={}</script>'.format(json.dumps(data))
def get_models(app_labels): """ Gets a list of models for the given app labels, with some exceptions. TODO: If a required model is referenced, it should also be included. Or at least discovered with a get_or_create() call. """ from django.db.models import get_app, get_apps, get_model from django.db.models import get_models as get_all_models # These models are not to be output, e.g. because they can be generated automatically # TODO: This should be "appname.modelname" string from django.contrib.contenttypes.models import ContentType EXCLUDED_MODELS = (ContentType, ) models = [] # If no app labels are given, return all if not app_labels: for app in get_apps(): models += [m for m in get_all_models(app) if m not in EXCLUDED_MODELS] # Get all relevant apps for app_label in app_labels: # If a specific model is mentioned, get only that model if "." in app_label: app_label, model_name = app_label.split(".", 1) models.append(get_model(app_label, model_name)) # Get all models for a given app else: models += [m for m in get_all_models(get_app(app_label)) if m not in EXCLUDED_MODELS] return models
def get_cql_models(app, connection=None, keyspace=None): """ :param app: django models module :param connection: connection name :param keyspace: keyspace :return: list of all cassandra.cqlengine.Model within app that should be synced to keyspace. """ from .models import DjangoCassandraModel models = [] single_cassandra_connection = len(list(get_cassandra_connections())) == 1 is_default_connection = connection == DEFAULT_DB_ALIAS or \ single_cassandra_connection for name, obj in inspect.getmembers(app): cql_model_types = ( cqlengine.models.Model, DjangoCassandraModel ) if ( inspect.isclass(obj) and issubclass(obj, cql_model_types) and not obj.__abstract__ ): if obj.__connection__ == connection or \ (obj.__connection__ is None and is_default_connection) or \ obj.__connection__ is None and obj.__keyspace__ is not None and obj.__keyspace__ == keyspace: models.append(obj) return models
def UserModels(self): models = [] for model in GntmModel.objects.all(): modelAktien = self.getAktienOfModel(model.id) if modelAktien > 0: models.append({"name": model.FirstName, "aktien": modelAktien, "id": model.id}) return models
def get_drip_models(): from .models import Drip models = [Drip,] for model_string in settings.DRIP_MODEL_CLASSES: app_label, model_name = model_string.split('.') models.append(apps.get_model(app_label=app_label, model_name=model_name)) return models
def generate_js_foreach_model(appname, model_name, start, limit): models = list() finish = int(start) + int(limit) queryset = get_models(model_name) total = queryset.count() for ct in queryset[start:finish]: m = ct.model_class() try: if m.__module__: models.append({ 'id': m.__module__ + '.' + m.__name__, 'name': m.__name__, 'model': generate_modeljs(appname, m), 'grid': generate_gridjs(appname, m), 'form': generate_formjs(appname, m), 'modellink': "files/model/{}/{}/{}.js".format(appname, ct.app_label, m.__name__), 'formlink': "files/form/{}/{}/{}form.js".format( appname, ct.app_label, m.__name__), 'gridlink': "files/grid/{}/{}/{}/list.js".format( appname, ct.app_label, m.__name__), 'applink': "mvc/{}/{}/{}".format(appname, ct.app_label, m.__name__), }) except: pass return {'data': models, 'total': total}
def search_for_models(keywords): models = [] if keywords: model_ids = search_by_keywords(keywords) for model_id in model_ids: models.append(SketchupModel.find_google_id(model_id)) # sleep(0.100) return models
def presume_models_used_by_m2m(self, fks): if len(fks) <= 2: return fks.values() models = [] for model in fks.values(): if model != mb_models.Link: models.append(model) return models
def remove_field_constraints(self, field, opts, models, refs): """Return SQL for removing constraints on a field. Args: field (django.db.models.Field): The field the constraints will be removed from. opts (django.db.models.options.Options): The Meta class for the model. models (list of django.db.models.Model): A caller-provided list that will be populated with models that constraints will be removed from. refs (dict): A caller-supplied dictionary that will be populated with references that are removed. The keys are models, and the values are lists of tuples of many-to-many models and fields. Returns: list: The list of SQL statements for removing constraints on the field. """ sql = [] if self.supports_constraints and field.primary_key: for f in opts.local_many_to_many: remote_field = get_remote_field(f) if remote_field and remote_field.through: through = remote_field.through for m2m_f in through._meta.local_fields: remote_m2m_f = get_remote_field(m2m_f) if not remote_m2m_f: continue remote_m2m_f_model = \ get_remote_field_model(remote_m2m_f) if (remote_m2m_f.field_name == field.column and remote_m2m_f_model._meta.db_table == opts.db_table): models.append(remote_m2m_f_model) refs.setdefault(remote_m2m_f_model, []).append( (through, m2m_f)) remove_refs = refs.copy() for relto in models: sql.extend( sql_delete_constraints(self.connection, relto, remove_refs)) return sql
def rename_table(self, model, old_db_tablename, db_tablename): sql_result = SQLResult() if old_db_tablename == db_tablename: # No Operation return sql_result max_name_length = self.connection.ops.max_name_length() refs = {} models = [] for field in model._meta.local_many_to_many: remote_field = get_remote_field(field) if (remote_field and remote_field.through and remote_field.through._meta.db_table == old_db_tablename): through = remote_field.through for m2m_field in through._meta.local_fields: remote_m2m_field = get_remote_field(m2m_field) if remote_m2m_field: remote_m2m_field_model = get_remote_field_model( remote_m2m_field) if remote_m2m_field_model == model: models.append(remote_m2m_field_model) refs.setdefault(remote_m2m_field_model, []).append( (through, m2m_field)) remove_refs = refs.copy() if self.supports_constraints: for relto in models: sql_result.add_pre_sql( sql_delete_constraints(self.connection, relto, remove_refs)) sql_result.add( self.get_rename_table_sql(model, old_db_tablename, db_tablename)) for relto in models: for rel_class, f in refs[relto]: if rel_class._meta.db_table == old_db_tablename: rel_class._meta.db_table = db_tablename rel_class._meta.db_table = \ truncate_name(rel_class._meta.db_table, max_name_length) if self.supports_constraints: sql_result.add_post_sql( sql_add_constraints(self.connection, relto, refs)) return sql_result
def create_session_models(self): #создаём модели для таблиц сессий models = [] dates = self.create_dates() for date in dates: models.append({'date': date, 'model': self.create_session_model(date)}) return models
def get_models_with_object_permissions(): """ Return a list of all models that inherit from `ObjectPermissionMixin` """ models = [] for model in apps.get_models(): if issubclass(model, ObjectPermissionMixin): models.append(model) return models
def ready(self): from django.contrib import admin reg = admin.site._registry models = [] for model, admincls in reg.items(): if getattr(admincls.__class__, "is_menu_entrieable", False): models.append(model) self.__class__.entrieable_models += sorted(models, key=str)
def get_app_model_classes(self): """ Helper method that returns a list of model classes for the current app. """ models = [] for m in self.models: mod, cls = m.rsplit('.', 1) mod = import_module(mod) models.append(getattr(mod, cls)) return models
def get_all_models(cls): models = [] for page in cls.objects.all(): app, model_class_name = page.model_class.split(".") model_class = apps.get_model(app, model_class_name) models.append({"model": model_class, "page": page}) if len(models) == 0: raise ValueError("No models set up, run manage.py loaddata pages") return models
def registry(cls): """ Создает модели, описанные из MODELS и возвращает их список.""" models = [] for model_name, model_fields in MODELS.items(): try: model = cls.registry_model(model_name, model_fields) models.append(model) except: print u'Ошибка при создании модели {}'.format(model_name) raise return models
def queue_models(models, context): """ Works an an appropriate ordering for the models. This isn't essential, but makes the script look nicer because more instances can be defined on their first try. """ # Max number of cycles allowed before we call it an infinite loop. MAX_CYCLES = 5 model_queue = [] number_remaining_models = len(models) allowed_cycles = MAX_CYCLES while number_remaining_models > 0: previous_number_remaining_models = number_remaining_models model = models.pop(0) # If the model is ready to be processed, add it to the list if check_dependencies(model, model_queue): model_class = ModelCode(model=model, context=context) model_queue.append(model_class) else: # Otherwise put the model back at the end of the list models.append(model) # Check for infinite loops. # This means there is a cyclic foreign key structure # That cannot be resolved by re-ordering number_remaining_models = len(models) if number_remaining_models == previous_number_remaining_models: allowed_cycles -= 1 if allowed_cycles <= 0: # Add the remaining models, but do not remove them from the model list missing_models = [ModelCode(model=m, context=context) for m in models] model_queue += missing_models # Replace the models with the model class objects # (sure, this is a little bit of hackery) models[:] = missing_models break else: allowed_cycles = MAX_CYCLES return model_queue
def order_by(self, *fields): models = [] wlist = self.__wlist for fn in fields: fn = self.__key(fn) wlist = sorted(wlist, key=lambda k: k[fn]) for wj in wlist: models.append(self.model(**wj)) return models
def container_settings(self, request, object_id, extra_context=None): """ Selecciona los modelos que se usan en esa carpeta """ obj = self.get_object(request, unquote(object_id)) opts = self.model._meta if obj is None: raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)}) if not self.has_change_permission(request, obj): raise PermissionDenied if not issubclass(self.model, Container): url = reverse('admin:index') return HttpResponseRedirect(url) all = obj.__class__._registry if request.method == 'POST': post_all = request.POST.has_key('all') if post_all: obj.iregister_all() else: models = [] for key, value in all.items(): ct = request.POST.get('ct_%s'%key, None) if ct is not None: models.append(value) if not models: messages.add_message(request, messages.ERROR, _('At least one option must be marked')) else: obj.iunregister_all() obj.iregister(models) messages.add_message(request, messages.INFO, _('The filter for this folder has been changed successfully')) return HttpResponseRedirect(obj.admin_url()) action_name = _('%s configuration') %obj obj.load_registry() allowed = obj._registry context = { 'object': obj, 'action_name': action_name, 'title': action_name, 'all': all, 'allowed': allowed, 'use_all': all==allowed, } context_instance = template.RequestContext(request, current_app=self.admin_site.name) return render_to_response([ "admin/cms/%s/%s/settings.html" % (opts.app_label, opts.object_name.lower()), "admin/cms/%s/settings.html" % opts.app_label, "admin/cms/settings.html" ], context, context_instance=context_instance)
def get_product_models(self): models = [] for el in self.driver.find_elements_by_css_selector( '#productModelListTable tbody tr'): tds = el.find_elements_by_css_selector('td') models.append({ 'name': tds[0].text.strip(), 'content': tds[1].text.strip() }) return models
def check_permissions(self, request): """Relay permission check to the view""" models = [self.model] # If the ?expand=.. parameter is used, check the type definition for # any expanded elements. These models are also checked for permission. for xsd_element in self.xsd_type.complex_elements: related_model = xsd_element.type.source if related_model is not None: models.append(related_model) self.wfs_view.check_permissions(request, models)
def import_tracked_models(): """ Import models """ redirects_models = getattr(settings, 'SEO_TRACKED_MODELS', []) models = [] for model_path in redirects_models: try: model = import_string(model_path) models.append(model) except ImportError as e: logging.warning("Failed to import model from path '%s'" % model_path) return models
def get_cql_models(app): """ :param app: django models module :return: list of all cqlengine.Model within app """ models = [] for name, obj in inspect.getmembers(app): if inspect.isclass(obj) and issubclass(obj, cqlengine.Model) \ and not obj.__abstract__: models.append(obj) return models
def get_models(self, model_names): """ Given a model name or a list of model names, return a list of model instances. """ models = [] if isinstance(model_names, str): model_names = [model_names] for app_label in settings.INSTALLED_APPS: for model_name in model_names: model = get_model(app_label.split(".")[-1], model_name) if model is not None: models.append(model) return models
def __pull_from_db__(self): self._registry = {} ids = self.ct_allowed.split(',') models = [] for id in ids: try: model = ContentType.objects.get(pk=id).model_class() except: pass else: models.append(model) if models: self.iregister(models)
def check_permissions(self, request): """Perform the access check for a particular request. This retrieves the accessed models, and relays further processing in the view. """ models = [self.model] # If the ?expand=.. parameter is used, check the type definition for # any expanded elements. These models are also checked for permission. for xsd_element in self.xsd_type.complex_elements: related_model = xsd_element.type.source if related_model is not None: models.append(related_model) self.wfs_view.check_permissions(request, models)
def get_default_model(cls, module, modelname=None, kind=None): models = [] from django.utils.module_loading import import_module try: dir_pack = dirname(import_module("%s.printmodel" % module).__file__) for _dir, _dirs, filenames in walk(dir_pack): for filename in filenames: if (filename[-3:] == ".py") and not filename.startswith('_'): mod_name = filename[:-3] print_mod = import_module("%s.printmodel.%s" % (module, mod_name)) if ((modelname is None) or (getattr(print_mod, "modelname") == modelname)) and ((kind is None) or (getattr(print_mod, "kind") == kind)): models.append((mod_name, getattr(print_mod, "name"))) except ImportError: pass return models
def test_generate_named_url(common_model_name_not_unique_class_mock, common_model_class_mock, settings_mock): """ graph topology: model_1 | \ fk_a | \ fk_b | \ v v model_2_1 model_2_2 | | fk_a | v model_3 """ models = [] model_3 = common_model_class_mock('model_3') models.append(model_3) model_2_1 = common_model_name_not_unique_class_mock( ('name', 'fk_a', 'str_with_choices_a'), model_3, model_3, 'model_2_1', ) models.append(model_2_1) model_2_2 = common_model_class_mock('model_2_2') models.append(model_2_2) model_1 = common_model_name_not_unique_class_mock( ('name', 'fk_a', 'fk_b'), model_2_1, model_2_2, 'model_1', ) models.append(model_1) random.shuffle(models) with mock.patch('awx.main.utils.named_url_graph.settings', settings_mock): generate_graph(models) obj_3 = model_3(name='cat') obj_2_2 = model_2_2(name='dog') obj_2_1 = model_2_1(name='bob', str_with_choices_a='foo', fk_a=obj_3) obj_1 = model_1(name='alice', fk_a=obj_2_1, fk_b=obj_2_2) obj_1.fk_b = None assert settings_mock.NAMED_URL_GRAPH[model_1].generate_named_url( obj_1) == 'alice++bob+foo++cat++' obj_1.fk_b = obj_2_2 assert settings_mock.NAMED_URL_GRAPH[model_1].generate_named_url( obj_1) == 'alice++bob+foo++cat++dog' obj_2_1.fk_a = None assert settings_mock.NAMED_URL_GRAPH[model_1].generate_named_url( obj_1) == 'alice++bob+foo++++dog' obj_1.fk_a = None assert settings_mock.NAMED_URL_GRAPH[model_1].generate_named_url( obj_1) == 'alice++++dog' obj_1.fk_b = None assert settings_mock.NAMED_URL_GRAPH[model_1].generate_named_url( obj_1) == 'alice++++'
def models_for_tag(self): """ Return the unique set of model names, all of which have had instances tagged with this tag. @todo: This is *really* hacky. Can we do it more elegantly? """ models = [] for attribute in dir(self): if attribute[-4:] == '_set': # we just want the model name, not the set name model_name = attribute.split('_')[0] #TODO: check if the list already contains the model name? models.append(model_name) # return the unique set of model names #TODO: what is more efficent? return set(models)
def queue_models(models, context): """ Works an an appropriate ordering for the models. This isn't essential, but makes the script look nicer because more instances can be defined on their first try. """ # Max number of cycles allowed before we call it an infinite loop. MAX_CYCLES = 5 model_queue = [] number_remaining_models = len(models) allowed_cycles = MAX_CYCLES while number_remaining_models > 0: previous_number_remaining_models = number_remaining_models model = models.pop(0) # If the model is ready to be processed, add it to the list if check_dependencies(model, model_queue): model_class = ModelCode(model=model, context=context) model_queue.append(model_class) # Otherwise put the model back at the end of the list else: models.append(model) # Check for infinite loops. # This means there is a cyclic foreign key structure # That cannot be resolved by re-ordering number_remaining_models = len(models) if number_remaining_models == previous_number_remaining_models: allowed_cycles -= 1 if allowed_cycles <= 0: # Add the remaining models, but do not remove them from the model list missing_models = [ ModelCode(model=m, context=context) for m in models ] model_queue += missing_models # Replace the models with the model class objects # (sure, this is a little bit of hackery) models[:] = missing_models break else: allowed_cycles = MAX_CYCLES return model_queue
def log_search_request(last_search_value): def clean_term(term): """ Возвращает кортеж из ненормализованног и нормализованного терма """ terms = term.strip().lower().split() nn_term = u' '.join(terms) n_terms = [] # нормализация for t in terms: n_term = t # morph.normalize(t.upper()) if isinstance(n_term, set): n_terms.append(n_term.pop().lower()) elif isinstance(n_term, unicode): n_terms.append(n_term.lower()) n_term = u' '.join(n_terms) return (nn_term, n_term) search_request_id = uuid.uuid4().hex term_groups = [] for part in last_search_value: term = part.get('value', None) if term: forms = clean_term(term) term_groups.append({ 'nn': forms[0], 'n': forms[1], 'use': u'_'.join(part.get('attr', u'not defined').split('_')[:-1]) }) models = [] for group in term_groups: srl = SearchRequestLog( catalog=u'*', search_id=search_request_id, use=group['use'], normalize=group['n'], not_normalize=group['nn'], ) models.append(srl) SearchRequestLog.objects.bulk_create(models)
def _test_split_input_on_n_shards(self, shards): from mapreduce.model import MapperSpec mapper_spec = MapperSpec( '', '', {'input_reader': { 'model': 'mapreduce.MRTestNode' }}, shards, ) readers = DjangoInputReader.split_input(mapper_spec) self.assertEqual(len(readers), shards) models = [] for reader in readers: for model in reader: models.append(model.pk) self.assertEqual(len(models), self.ENTITY_COUNT) self.assertEqual(len(models), len(set(models)))
def create_models_from_components(component_node, model_parameters, models=[], **kwargs): """ recursively look through mppt components to recreate that hierarchy in models :param component_node: :param model_parameters: :param models: :return: None """ title = model_parameters["title"] model_parameters["title"] = title[:title.index(" : ")] + " : " + component_node.name model_parameters["component_key"] = component_node.get_key() model_parameters["is_root"] = kwargs.pop("is_root", False) with allow_unsaved_fk(MetadataModel, ["parent", ]): model = MetadataModel(**model_parameters) models.append(model) for child_component in component_node.get_children(): model_parameters["parent"] = model create_models_from_components(child_component,model_parameters,models)
def get_cql_models(app, keyspace=None): """ :param app: django models module :param keyspace: database name (keyspace) :return: list of all cassandra.cqlengine.Model within app that should be synced to keyspace. """ from cassandra.cqlengine.models import DEFAULT_KEYSPACE keyspace = keyspace or DEFAULT_KEYSPACE models = [] for name, obj in inspect.getmembers(app): if inspect.isclass(obj) and issubclass(obj, cqlengine.models.Model) \ and not obj.__abstract__: if (obj.__keyspace__ is None and keyspace == DEFAULT_KEYSPACE) \ or obj.__keyspace__ == keyspace: models.append(obj) return models
def test_populate_named_url_invalid_query_kwargs( common_model_name_not_unique_class_mock, common_model_class_mock, settings_mock, input_): models = [] model_2 = common_model_class_mock('model_2') models.append(model_2) model_1 = common_model_name_not_unique_class_mock( ('name', 'fk_a', 'str_with_choices_a'), model_2, model_2, 'model_1', ) models.append(model_1) random.shuffle(models) with mock.patch('awx.main.utils.named_url_graph.settings', settings_mock): generate_graph(models) kwargs = {} assert not settings_mock.NAMED_URL_GRAPH[ model_1].populate_named_url_query_kwargs(kwargs, input_)
def add_app(app_label: str, prefix: str = ""): """ Iterate through each model in the app and pass it to node type creators. """ from django.apps import apps # Get the required django app. app = apps.get_app_config(app_label) # Create a collection of models of standard models (Pages, Images, Documents). models = [mdl for mdl in app.get_models()] # Add snippet models to model collection. for snippet in get_snippet_models(): if snippet._meta.app_label == app_label: models.append(snippet) # Create add each model to correct section of registry. for model in models: register_model(model, prefix)
def add_app(app: str, prefix: str = ""): """ Iterate through each model in the app and pass it to node type creators. """ # Create a collection of models of standard models (Pages, Images, # Documents). models = [ mdl.model_class() for mdl in ContentType.objects.filter(app_label=app).all() ] # Add snippet models to model collection. for snippet in get_snippet_models(): if snippet._meta.app_label == app: models.append(snippet) # Create add each model to correct section of registry. for model in models: register_model(model, prefix)
def filter(self, *args, **kwargs): models = [] results = [] for wj in self.__wlist: found = 0 count = len(kwargs) for k in kwargs: key = self.__key(k) if self.__ispk(key): kwargs[k] = int(kwargs[k]) if key in wj and str(wj[key]) == str(kwargs[k]): found += 1 if found == count: results.append(wj) for r in results: models.append(self.model(**r)) return models
def pull(self, sync_log = None): retorno = {} first = sync_log == None new_sync = datetime.datetime.now() if not first: last_sync = datetime.datetime(*time.strptime(sync_log['fields']['synced_at'], '%Y-%m-%d %H:%M:%S')[:6]) # Veamos los modelos # TODO: Mejorar esto de meterle mano al SyncData models = [] #import ipdb; ipdb.set_trace() for app in self._registry.values(): for model in app.keys(): # Si no es el primero filtro solo los modelos interesantes if not first: model_type = ContentType.objects.get_for_model(model) sd = SyncData.objects.filter(content_type__pk = model_type.id, update_at__gt=last_sync).filter(update_at__lt=new_sync) if bool(sd): models.append(model) else: models.append(model) models = get_model_order(models) retorno['models'] = map(lambda m: str(m._meta), models) retorno['objects'] = [] for model in models: remote = self._registry[model._meta.app_label][model] remote_manager = remote._default_manager # Si no es el primero filtro solo los objetos de modelos interesantes if first: retorno['objects'].extend(remote_manager.all()) else: retorno['objects'].extend(remote_manager.filter(update_at__gt = last_sync, update_at__lt = new_sync)) retorno['sync_log'] = {} retorno['sync_log']['synced_at'] = new_sync.strftime("%Y-%m-%d %H:%M:%S") retorno['sync_log']['sync_id'] = random_string(32) return retorno
def search_by_keywords(keywords, create_models=False): """ Search the API for the keywords, return a list of model_ids. """ params = { "startRow": 1, "endRow": NUMBER_OF_RESULTS, "q": keywords, "type": "SKETCHUP_MODEL", "class": "entity", "Lk": True, } json_data = api_get("Search", **params) if create_models: models = [] for entry in json_data["entries"]: try: model = SketchupModel() _parse_model_entry(model, entry) model.save() models.append(model) except KeyError as exception: print exception return models else: return [entry["id"] for entry in json_data["entries"]]
def get_new_realization_set(project=None, ontology=None, model_proxy=None, standard_property_proxies=[], scientific_property_proxies=[], model_customizer=None, vocabularies=[]): models = [] model_parameters = { "project" : project, 'version' : ontology, "proxy" : model_proxy, } if model_customizer.model_show_hierarchy or len(vocabularies) == 0: # setup the root model... model = MetadataModel(**model_parameters) model.vocabulary_key = DEFAULT_VOCABULARY_KEY model.component_key = DEFAULT_COMPONENT_KEY model.title = model_customizer.model_root_component model.is_root = True models.append(model) for vocabulary in vocabularies: if model_customizer.model_show_hierarchy: model_parameters["parent"] = model else: model_parameters.pop("parent", None) model_parameters["vocabulary_key"] = vocabulary.get_key() components = vocabulary.component_proxies.all() if components: # recursively go through the components of each vocabulary, # adding corresponding models to the list root_component = components[0].get_root() model_parameters["title"] = u"%s : %s" % (vocabulary.name, root_component.name) create_models_from_components( root_component, model_parameters, models, # is_root will be False in all instances except the 1st time this is called # for a component w/ no hierarchy is_root=not model_customizer.model_show_hierarchy, ) standard_properties = {} scientific_properties = {} for i, model in enumerate(models): model.reset() property_key = model.get_model_key() # since this is _not_ being created in the context of a subform, # each model in models corresponds to a separate component and will therefore have a unique key standard_properties[property_key] = [] for standard_property_proxy in standard_property_proxies: with allow_unsaved_fk(MetadataStandardProperty, ["model", ]): standard_property = MetadataStandardProperty(proxy=standard_property_proxy,model=model) standard_property.reset() standard_properties[property_key].append(standard_property) scientific_properties[property_key] = [] try: for scientific_property_proxy in scientific_property_proxies[property_key]: with allow_unsaved_fk(MetadataScientificProperty, ["model", ]): scientific_property = MetadataScientificProperty(proxy=scientific_property_proxy,model=model) scientific_property.reset() scientific_properties[property_key].append(scientific_property) except KeyError: # there were no scientific properties associated w/ this component (or, rather, no components associated w/ this vocabulary) # that's okay, scientific_properties[property_key] = [] realization_set = { "models": models, "standard_properties": standard_properties, "scientific_properties": scientific_properties, } return realization_set
def handle(self, **options): # Pick up the options now = datetime.now() self.database = options['database'] if self.database not in settings.DATABASES: raise CommandError("No database settings known for '%s'" % self.database ) if options['user']: try: self.user = User.objects.all().using(self.database).get(username=options['user']) except: raise CommandError("User '%s' not found" % options['user'] ) else: self.user = None timestamp = now.strftime("%Y%m%d%H%M%S") if self.database == DEFAULT_DB_ALIAS: logfile = 'importworkbook-%s.log' % timestamp else: logfile = 'importworkbook_%s-%s.log' % (self.database, timestamp) task = None try: setattr(_thread_locals, 'database', self.database) # Initialize the task if options['task']: try: task = Task.objects.all().using(self.database).get(pk=options['task']) except: raise CommandError("Task identifier not found") if task.started or task.finished or task.status != "Waiting" or task.name not in ('frepple_importworkbook', 'importworkbook'): raise CommandError("Invalid task identifier") task.status = '0%' task.started = now else: task = Task(name='importworkbook', submitted=now, started=now, status='0%', user=self.user) task.arguments = ' '.join(options['file']) task.save(using=self.database) all_models = [ (ct.model_class(), ct.pk) for ct in ContentType.objects.all() if ct.model_class() ] try: with transaction.atomic(using=self.database): # Find all models in the workbook for file in filename: wb = load_workbook(filename=file, read_only=True, data_only=True) models = [] for ws_name in wb.sheetnames: # Find the model model = None contenttype_id = None for m, ct in all_models: if matchesModelName(ws_name, m): model = m contenttype_id = ct break if not model or model in EXCLUDE_FROM_BULK_OPERATIONS: print(force_text(_("Ignoring data in worksheet: %s") % ws_name)) # yield '<div class="alert alert-warning">' + force_text(_("Ignoring data in worksheet: %s") % ws_name) + '</div>' elif not self.user.has_perm('%s.%s' % (model._meta.app_label, get_permission_codename('add', model._meta))): # Check permissions print(force_text(_("You don't permissions to add: %s") % ws_name)) # yield '<div class="alert alert-danger">' + force_text(_("You don't permissions to add: %s") % ws_name) + '</div>' else: deps = set([model]) GridReport.dependent_models(model, deps) models.append( (ws_name, model, contenttype_id, deps) ) # Sort the list of models, based on dependencies between models models = GridReport.sort_models(models) print('197----', models) # Process all rows in each worksheet for ws_name, model, contenttype_id, dependencies in models: print(force_text(_("Processing data in worksheet: %s") % ws_name)) # yield '<strong>' + force_text(_("Processing data in worksheet: %s") % ws_name) + '</strong><br>' # yield ('<div class="table-responsive">' # '<table class="table table-condensed" style="white-space: nowrap;"><tbody>') numerrors = 0 numwarnings = 0 firsterror = True ws = wb[ws_name] for error in parseExcelWorksheet(model, ws, user=self.user, database=self.database, ping=True): if error[0] == DEBUG: # Yield some result so we can detect disconnect clients and interrupt the upload # yield ' ' continue if firsterror and error[0] in (ERROR, WARNING): print('%s %s %s %s %s%s%s' % ( capfirst(_("worksheet")), capfirst(_("row")), capfirst(_("field")), capfirst(_("value")), capfirst(_("error")), " / ", capfirst(_("warning")) )) # yield '<tr><th class="sr-only">%s</th><th>%s</th><th>%s</th><th>%s</th><th>%s%s%s</th></tr>' % ( # capfirst(_("worksheet")), capfirst(_("row")), # capfirst(_("field")), capfirst(_("value")), # capfirst(_("error")), " / ", capfirst(_("warning")) # ) firsterror = False if error[0] == ERROR: print('%s %s %s %s %s: %s' % ( ws_name, error[1] if error[1] else '', error[2] if error[2] else '', error[3] if error[3] else '', capfirst(_('error')), error[4] )) # yield '<tr><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s: %s</td></tr>' % ( # ws_name, # error[1] if error[1] else '', # error[2] if error[2] else '', # error[3] if error[3] else '', # capfirst(_('error')), # error[4] # ) numerrors += 1 elif error[1] == WARNING: print('%s %s %s %s %s: %s' % ( ws_name, error[1] if error[1] else '', error[2] if error[2] else '', error[3] if error[3] else '', capfirst(_('warning')), error[4] )) # yield '<tr><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s: %s</td></tr>' % ( # ws_name, # error[1] if error[1] else '', # error[2] if error[2] else '', # error[3] if error[3] else '', # capfirst(_('warning')), # error[4] # ) numwarnings += 1 else: print('%s %s %s %s %s %s' % ( "danger" if numerrors > 0 else 'success', ws_name, error[1] if error[1] else '', error[2] if error[2] else '', error[3] if error[3] else '', error[4] )) # yield '<tr class=%s><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>' % ( # "danger" if numerrors > 0 else 'success', # ws_name, # error[1] if error[1] else '', # error[2] if error[2] else '', # error[3] if error[3] else '', # error[4] # ) # yield '</tbody></table></div>' print('%s' % _("Done")) # yield '<div><strong>%s</strong></div>' % _("Done") except GeneratorExit: logger.warning('Connection Aborted') except Exception as e: if task: task.status = 'Failed' task.message = '%s' % e task.finished = datetime.now() raise e finally: setattr(_thread_locals, 'database', None) if task: task.save(using=self.database) return _("Done")
def handle(self, *attrs, **options): error_option = None # # follow options # if options["no_follow_fk"] is None: follow_fk = None else: follow_fk = False if options["no_follow_m2m"] is None: follow_m2m = None # this is the only chance for the follow_m2m options to be parsed if options["follow_m2m"]: try: value = options["follow_m2m"].split(",") if len(value) == 1 and value[0].count(":") == 1: follow_m2m = [int(i) for i in value[0].split(":")] else: follow_m2m = {} for field in value: key, minval, maxval = field.split(":") follow_m2m[key] = int(minval), int(maxval) except ValueError: error_option = "--follow-m2m={0}".format(options["follow_m2m"]) else: follow_m2m = False # # generation options # if options["generate_fk"] is None: generate_fk = None else: generate_fk = options["generate_fk"].split(",") generate_m2m = None if options["generate_m2m"]: try: value = [v for v in options["generate_m2m"].split(",") if v] if len(value) == 1 and value[0].count(":") == 1: generate_m2m = [int(i) for i in value[0].split(":")] else: generate_m2m = {} for field in value: key, minval, maxval = field.split(":") generate_m2m[key] = int(minval), int(maxval) except ValueError: error_option = "--generate-m2m={0}".format(options["generate_m2m"]) if error_option: raise CommandError( u"Invalid option {0}\nExpected: {1}=field:min:max,field2:min:max... (min and max must be numbers)".format( error_option, error_option.split("=", 1)[0] ) ) use = options["use"] if use: use = use.split(".") use = getattr(importlib.import_module(".".join(use[:-1])), use[-1]) overwrite_defaults = options["overwrite_defaults"] self.verbosity = int(options["verbosity"]) models = [] for attr in attrs: try: app_label, model_label = attr.split(".") model_label, count = model_label.split(":") count = int(count) except ValueError: raise CommandError( u"Invalid argument: {0}\nExpected: app_label.ModelName:count (count must be a number)".format(attr) ) model = get_model(app_label, model_label) if not model: raise CommandError(u"Unknown model: {0}.{1}".format(app_label, model_label)) models.append((model, count)) signals.instance_created.connect(self.print_instance) autofixture.autodiscover() kwargs = { "overwrite_defaults": overwrite_defaults, "follow_fk": follow_fk, "generate_fk": generate_fk, "follow_m2m": follow_m2m, "generate_m2m": generate_m2m, } for model, count in models: if use: fixture = use(model, **kwargs) fixture.create(count) else: autofixture.create(model, count, **kwargs)
def handle(self, *args, **options): # Pick up the options if 'database' in options: self.database = options['database'] or DEFAULT_DB_ALIAS else: self.database = DEFAULT_DB_ALIAS if self.database not in settings.DATABASES: raise CommandError("No database settings known for '%s'" % self.database ) if 'user' in options and options['user']: try: self.user = User.objects.all().using(self.database).get(username=options['user']) except: raise CommandError("User '%s' not found" % options['user'] ) else: self.user = None now = datetime.now() task = None try: # Initialize the task if 'task' in options and options['task']: try: task = Task.objects.all().using(self.database).get(pk=options['task']) except: raise CommandError("Task identifier not found") if task.started or task.finished or task.status != "Waiting" or task.name != 'load from folder': raise CommandError("Invalid task identifier") task.status = '0%' task.started = now else: task = Task(name='load from folder', submitted=now, started=now, status='0%', user=self.user) task.arguments = ' '.join(['"%s"' % i for i in args]) task.save(using=self.database) # Choose the right self.delimiter and language self.delimiter = get_format('DECIMAL_SEPARATOR', settings.LANGUAGE_CODE, True) == ',' and ';' or ',' translation.activate(settings.LANGUAGE_CODE) # Execute filestoupload = list() if os.path.isdir(settings.DATABASES[self.database]['FILEUPLOADFOLDER']): thisfolder = settings.DATABASES[self.database]['FILEUPLOADFOLDER'] for fileindir in os.listdir(settings.DATABASES[self.database]['FILEUPLOADFOLDER']): if fileindir.endswith('.csv'): filestoupload.append(fileindir) #filestoupload.append([file,strftime("%Y-%m-%d %H:%M:%S",localtime(os.stat(os.path.join(thisfolder, file)).st_mtime)),sizeof_fmt(os.stat(os.path.join(thisfolder, file)).st_size, 'B')]) all_models = [ (ct.model_class(), ct.pk) for ct in ContentType.objects.all() if ct.model_class() ] models = [] for ifile in filestoupload: filename0 = ifile.split('.')[0] model = None contenttype_id = None for m, ct in all_models: if filename0.lower() in (m._meta.model_name.lower(), m._meta.verbose_name.lower(), m._meta.verbose_name_plural.lower()): model = m contenttype_id = ct break if not model or model in EXCLUDE_FROM_BULK_OPERATIONS: print("Ignoring data in file: %s" % ifile) elif not self.user==None and not self.user.has_perm('%s.%s' % (model._meta.app_label, get_permission_codename('add', model._meta))): # Check permissions print("You don't permissions to add: %s" % ifile) else: deps = set([model]) GridReport.dependent_models(model, deps) models.append( (ifile, model, contenttype_id, deps) ) # Sort the list of models, based on dependencies between models cnt = len(models) ok = False while not ok: ok = True for i in range(cnt): for j in range(i + 1, cnt): if models[i][1] in models[j][3]: # A subsequent model i depends on model i. The list ordering is # thus not ok yet. We move this element to the end of the list. models.append(models.pop(i)) ok = False for ifile, model, contenttype_id, dependencies in models: print("Processing data in file: %s" % ifile) rownum = 0 has_pk_field = False headers = [] uploadform = None changed = 0 added = 0 numerrors = 0 #Will the permissions have to be checked table by table? permname = get_permission_codename('add', model._meta) if not self.user == None and not self.user.has_perm('%s.%s' % (model._meta.app_label, permname)): print('Permission denied') return filetoparse=os.path.join(os.path.abspath(thisfolder), ifile) self.parseCSVloadfromfolder(model, filetoparse) # Task update task.status = 'Done' task.finished = datetime.now() except Exception as e: if task: task.status = 'Failed' task.message = '%s' % e task.finished = datetime.now() raise e finally: if task: task.save(using=self.database)
def handle(self, *args, **options): # Pick up the options if 'database' in options: self.database = options['database'] or DEFAULT_DB_ALIAS else: self.database = DEFAULT_DB_ALIAS if self.database not in settings.DATABASES: raise CommandError("No database settings known for '%s'" % self.database ) if 'user' in options and options['user']: try: self.user = User.objects.all().using(self.database).get(username=options['user']) except: raise CommandError("User '%s' not found" % options['user'] ) else: self.user = None now = datetime.now() task = None self.logfile = None try: # Initialize the task if 'task' in options and options['task']: try: task = Task.objects.all().using(self.database).get(pk=options['task']) except: raise CommandError("Task identifier not found") if task.started or task.finished or task.status != "Waiting" or task.name != 'load from folder': raise CommandError("Invalid task identifier") task.status = '0%' task.started = now else: task = Task(name='load from folder', submitted=now, started=now, status='0%', user=self.user) task.arguments = ' '.join(['"%s"' % i for i in args]) task.save(using=self.database) # Choose the right self.delimiter and language self.delimiter = get_format('DECIMAL_SEPARATOR', settings.LANGUAGE_CODE, True) == ',' and ';' or ',' translation.activate(settings.LANGUAGE_CODE) # Execute if os.path.isdir(settings.DATABASES[self.database]['FILEUPLOADFOLDER']): # Open the logfile self.logfile = open(os.path.join(settings.DATABASES[self.database]['FILEUPLOADFOLDER'], 'loadfromfolder.log'), "a") print("%s Started upload from folder\n" % datetime.now(), file=self.logfile) all_models = [ (ct.model_class(), ct.pk) for ct in ContentType.objects.all() if ct.model_class() ] models = [] for ifile in os.listdir(settings.DATABASES[self.database]['FILEUPLOADFOLDER']): if not ifile.endswith('.csv'): continue filename0 = ifile.split('.')[0] model = None contenttype_id = None for m, ct in all_models: if filename0.lower() in (m._meta.model_name.lower(), m._meta.verbose_name.lower(), m._meta.verbose_name_plural.lower()): model = m contenttype_id = ct print("%s Matched a model to file: %s" % (datetime.now(),ifile), file=self.logfile) break if not model or model in EXCLUDE_FROM_BULK_OPERATIONS: print("%s Ignoring data in file: %s" % (datetime.now(),ifile), file=self.logfile) elif self.user and not self.user.has_perm('%s.%s' % (model._meta.app_label, get_permission_codename('add', model._meta))): # Check permissions print("%s You don't have permissions to add: %s" % (datetime.now(),ifile), file=self.logfile) else: deps = set([model]) GridReport.dependent_models(model, deps) models.append( (ifile, model, contenttype_id, deps) ) # Sort the list of models, based on dependencies between models cnt = len(models) ok = False while not ok: ok = True for i in range(cnt): for j in range(i + 1, cnt): if models[i][1] != models[j][1] and models[i][1] in models[j][3]: # A subsequent model i depends on model i. The list ordering is # thus not ok yet. We move this element to the end of the list. models.append(models.pop(i)) ok = False task.status = '10%' task.save(using=self.database) i=0 errors = 0 for ifile, model, contenttype_id, dependencies in models: i += 1 print("%s Started processing data in file: %s" % (datetime.now(),ifile), file=self.logfile) filetoparse=os.path.join(os.path.abspath(settings.DATABASES[self.database]['FILEUPLOADFOLDER']), ifile) errors += self.parseCSVloadfromfolder(model, filetoparse) print("%s Finished processing data in file: %s\n" % (datetime.now(),ifile), file=self.logfile) task.status = str(int(10+i/cnt*80))+'%' task.save(using=self.database) # Task update if errors: task.status = 'Failed' task.message = "Uploaded %s data files with %s errors" % (cnt, errors) else: task.status = 'Done' task.message = "Uploaded %s data file" % cnt task.finished = datetime.now() except Exception as e: print("%s Failed" % datetime.now(), file=self.logfile) if task: task.status = 'Failed' task.message = '%s' % e task.finished = datetime.now() raise e finally: if task: task.status = '100%' task.save(using=self.database) if self.logfile: print('%s End of upload from folder\n' % datetime.now(), file=self.logfile) self.logfile.close()
def elementTreeToModels(elementTree, unitTestDB = "No"): treeIter = elementTree.iter() models = [] crisisModels = [] personModels = [] orgModels = [] models.append(crisisModels) models.append(personModels) models.append(orgModels) nextElement = treeIter.next() # Retrieves root element nextElement = treeIter.next() # Retrieves next Crisis element try: # Parse crises. while (nextElement.tag == "Crisis"): crisisAttributes = getTextAndAttributes(nextElement) crisisID = crisisAttributes['ID'] crisisName = crisisAttributes['Name'] crisisPersonIDs = [] crisisOrgIDs = [] crisisKind = "" crisisDate = "" crisisTime = "" crisisLocations = [] crisisHumanImpact = [] crisisEconomicImpact = [] crisisResourcesNeeded = [] crisisWaysToHelp = [] crisisCitations = [] crisisExternalLinks = [] crisisImages = [] crisisVideos = [] crisisMaps = [] crisisFeeds = [] crisisSummary = "" commonExists=False nextElement = treeIter.next() # People element if (nextElement.tag == "People"): nextElement = treeIter.next() # First Person in People sequence while (nextElement.tag == "Person"): crisisPersonIDs.append(nextElement.attrib['ID']) nextElement = treeIter.next() if (nextElement.tag == "Organizations"): nextElement = treeIter.next() while (nextElement.tag == "Org"): crisisOrgIDs.append(nextElement.attrib['ID']) nextElement = treeIter.next() if (nextElement.tag == "Kind"): crisisKind = nextElement.text nextElement = treeIter.next() if (nextElement.tag == "Date"): crisisDate = nextElement.text nextElement = treeIter.next() if (nextElement.tag == "Time"): crisisTime = nextElement.text nextElement = treeIter.next() if (nextElement.tag == "Locations"): nextElement = treeIter.next() while (nextElement.tag == "li"): crisisLocations.append(nextElement.text) nextElement = treeIter.next() if (nextElement.tag == "HumanImpact"): nextElement = treeIter.next() while (nextElement.tag == "li"): crisisHumanImpact.append(nextElement.text) nextElement = treeIter.next() if (nextElement.tag == "EconomicImpact"): nextElement = treeIter.next() while (nextElement.tag == "li"): crisisEconomicImpact.append(nextElement.text) nextElement = treeIter.next() if (nextElement.tag == "ResourcesNeeded"): nextElement = treeIter.next() while (nextElement.tag == "li"): crisisResourcesNeeded.append(nextElement.text) nextElement = treeIter.next() if (nextElement.tag == "WaysToHelp"): nextElement = treeIter.next() while (nextElement.tag == "li"): crisisWaysToHelp.append(nextElement.text) nextElement = treeIter.next() if (nextElement.tag == "Common"): commonExists=True nextElement, treeIter, d = getCommonData(nextElement, treeIter) crisisCitations = d.get('Citations') crisisExternalLinks = d.get('ExternalLinks') crisisImages = d.get('Images') crisisVideos = d.get('Videos') crisisMaps = d.get('Maps') crisisFeeds = d.get('Feeds') crisisSummary = d.get('Summary') #if isNotDuplicate(crisisID, "crisis", unitTestDB): # Common if (commonExists==False): common=None #common.save() else: common= Common() if(crisisSummary != "") : common.summary= crisisSummary common.save() for c in crisisCitations: li=List() li.href=c.get("href") li.embed=c.get("embed") li.text=c.get("text") li.content=c.get("content") li.save() common.citations.add(li) for c in crisisExternalLinks: li=List( href=c.get("href"), embed=c.get("embed"), text=c.get("text"), content=c.get("content") ) li.save() common.externalLinks.add(li) for c in crisisImages: li=List( href=c.get("href"), embed=c.get("embed"), text=c.get("text"), content=c.get("content") ) li.save() common.images.add(li) for c in crisisVideos: li=List( href=c.get("href"), embed=c.get("embed"), text=c.get("text"), content=c.get("content") ) li.save() common.videos.add(li) for c in crisisMaps: li=List( href=c.get("href"), embed=c.get("embed"), text=c.get("text"), content=c.get("content") ) li.save() common.maps.add(li) for c in crisisFeeds: li=List( href=c.get("href"), embed=c.get("embed"), text=c.get("text"), content=c.get("content") ) li.save() common.feeds.add(li) models[0].append( Crisis( id = crisisID, name = crisisName, kind = crisisKind, date = crisisDate, time = crisisTime, people = str(crisisPersonIDs), organizations = str(crisisOrgIDs), location = str(crisisLocations), humanImpact = str(crisisHumanImpact), economicImpact = str(crisisEconomicImpact), resourcesNeeded = str(crisisResourcesNeeded), waytoHelp = str(crisisWaysToHelp), common = common, slug = slugify(crisisID), ) ) # Parse people. while (nextElement.tag == "Person"): personAttributes = getTextAndAttributes(nextElement) personID = personAttributes['ID'] personName = personAttributes['Name'] personCrisisIDs = [] personOrgIDs = [] personKind = "" personLocation = "" personCitations = [] personExternalLinks = [] personImages = [] personVideos = [] personMaps = [] personFeeds = [] personSummary = "" commonExists = False nextElement = treeIter.next() if (nextElement.tag == "Crises"): nextElement = treeIter.next() # First Crisis in Crises sequence while (nextElement.tag == "Crisis"): personCrisisIDs.append(nextElement.attrib['ID']) nextElement = treeIter.next() if (nextElement.tag == "Organizations"): nextElement = treeIter.next() # First Org in Organizations sequence while (nextElement.tag == "Org"): personOrgIDs.append(nextElement.attrib['ID']) nextElement = treeIter.next() if (nextElement.tag == "Kind"): personKind = nextElement.text # Kind text nextElement = treeIter.next() if (nextElement.tag == "Location"): personLocation = nextElement.text # Location text nextElement = treeIter.next() if (nextElement.tag == "Common"): commonExists = True nextElement, treeIter, d = getCommonData(nextElement, treeIter) personCitations = d.get('Citations') personExternalLinks = d.get('ExternalLinks') personImages = d.get('Images') personVideos = d.get('Videos') personMaps = d.get('Maps') personFeeds = d.get('Feeds') personSummary = d.get('Summary') #if isNotDuplicate(personID, "person", unitTestDB): if (commonExists == False): common=None else: common= Common() if(personSummary != "") : common.summary= personSummary common.save() for c in personCitations: li=List() li.href=c.get("href") li.embed=c.get("embed") li.text=c.get("text") li.content=c.get("content") li.save() common.citations.add(li) for c in personExternalLinks: li=List( href=c.get("href"), embed=c.get("embed"), text=c.get("text"), content=c.get("content") ) li.save() common.externalLinks.add(li) for c in personImages: li=List( href=c.get("href"), embed=c.get("embed"), text=c.get("text"), content=c.get("content") ) li.save() common.images.add(li) for c in personVideos: li=List( href=c.get("href"), embed=c.get("embed"), text=c.get("text"), content=c.get("content") ) li.save() common.videos.add(li) for c in personMaps: if c.get("text") != None : c["text"] = unicodedata.normalize('NFKD', unicode(c.get("text"))).encode('ascii', 'ignore') li=List( href=c.get("href"), embed=c.get("embed"), text=c.get("text"), content=c.get("content") ) print "personID = ", personID print "li.text = ", li.text li.save() common.maps.add(li) for c in personFeeds: li=List( href=c.get("href"), embed=c.get("embed"), text=c.get("text"), content=c.get("content") ) li.save() common.feeds.add(li) models[1].append( Person( id = personID, name = personName, kind = personKind, location = personLocation, crises=str(personCrisisIDs), organizations=str(personOrgIDs), common = common, slug = slugify(personID), ) ) # Parse organizations. while (nextElement.tag == "Organization"): orgAttributes = getTextAndAttributes(nextElement) orgID = orgAttributes['ID'] orgName = orgAttributes['Name'] orgCrisisIDs = [] orgPeopleIDs = [] kind = "" location = "" history = [] contactInfo = [] orgCitations = [] orgExternalLinks = [] orgImages = [] orgVideos = [] orgMaps = [] orgFeeds = [] orgSummary = "" commonExists = False nextElement = treeIter.next() if (nextElement.tag == "Crises"): nextElement = treeIter.next() while (nextElement.tag == "Crisis"): orgCrisisIDs.append(nextElement.attrib['ID']) nextElement = treeIter.next() if (nextElement.tag == "People"): nextElement = treeIter.next() while (nextElement.tag == "Person"): orgPeopleIDs.append(nextElement.attrib['ID']) nextElement = treeIter.next() if (nextElement.tag == "Kind"): kind = nextElement.text nextElement = treeIter.next() if (nextElement.tag == "Location"): location = nextElement.text nextElement = treeIter.next() if (nextElement.tag == "History"): nextElement = treeIter.next() while (nextElement.tag == "li"): history.append(nextElement.text) nextElement = treeIter.next() if (nextElement.tag == "ContactInfo"): nextElement = treeIter.next() while (nextElement.tag == "li"): contactInfo.append(nextElement.text) nextElement = treeIter.next() if (nextElement.tag == "Common"): commonExists = True #print "******* nextElement = ", nextElement nextElement, treeIter, d = getCommonData(nextElement, treeIter) orgCitations = d.get('Citations') orgExternalLinks = d.get('ExternalLinks') orgImages = d.get('Images') orgVideos = d.get('Videos') orgMaps = d.get('Maps') orgFeeds = d.get('Feeds') orgSummary = d.get('Summary') if (commonExists == False): common=None else: common= Common() if(orgSummary != "") : common.summary= orgSummary common.save() for c in orgCitations: li=List() li.href=c.get("href") li.embed=c.get("embed") li.text=c.get("text") li.content=c.get("content") li.save() common.citations.add(li) for c in orgExternalLinks: li=List( href=c.get("href"), embed=c.get("embed"), text=c.get("text"), content=c.get("content") ) li.save() common.externalLinks.add(li) for c in orgImages: li=List( href=c.get("href"), embed=c.get("embed"), text=c.get("text"), content=c.get("content") ) li.save() common.images.add(li) for c in orgVideos: li=List( href=c.get("href"), embed=c.get("embed"), text=c.get("text"), content=c.get("content") ) li.save() common.videos.add(li) for c in orgMaps: li=List( href=c.get("href"), embed=c.get("embed"), text=c.get("text"), content=c.get("content") ) li.save() common.maps.add(li) for c in orgFeeds: li=List( href=c.get("href"), embed=c.get("embed"), text=c.get("text"), content=c.get("content") ) li.save() common.feeds.add(li) models[2].append( Organization( id = orgID, name = orgName, kind = kind, location = location, history = history, contact = contactInfo, crises=str(orgCrisisIDs), people=str(orgPeopleIDs), common = common, slug = slugify(orgID), ) ) nextElement = treeIter.next() # Control should normally reach here and return from the function. except StopIteration as e: #print "\nReached end of file correctly!" #print models return models """ except IntegrityError, e: print "hello" continue """ # Control should never normally reach here. raise IOError("Invalid file!")
def handle(self, *attrs, **options): error_option = None # # follow options # if options['no_follow_fk'] is None: follow_fk = None else: follow_fk = False if options['no_follow_m2m'] is None: follow_m2m = None # this is the only chance for the follow_m2m options to be parsed if options['follow_m2m']: try: value = options['follow_m2m'].split(',') if len(value) == 1 and value[0].count(':') == 1: follow_m2m = [int(i) for i in value[0].split(':')] else: follow_m2m = {} for field in value: key, minval, maxval = field.split(':') follow_m2m[key] = int(minval), int(maxval) except ValueError: error_option = '--follow-m2m={0}'.format(options['follow_m2m']) else: follow_m2m = False # # generation options # if options['generate_fk'] is None: generate_fk = None else: generate_fk = options['generate_fk'].split(',') generate_m2m = None if options['generate_m2m']: try: value = [v for v in options['generate_m2m'].split(',') if v] if len(value) == 1 and value[0].count(':') == 1: generate_m2m = [int(i) for i in value[0].split(':')] else: generate_m2m = {} for field in value: key, minval, maxval = field.split(':') generate_m2m[key] = int(minval), int(maxval) except ValueError: error_option = '--generate-m2m={0}'.format(options['generate_m2m']) if error_option: raise CommandError( u'Invalid option {0}\n' u'Expected: {1}=field:min:max,field2:min:max... (min and max must be numbers)'.format( error_option, error_option.split('=', 1)[0])) use = options['use'] if use: use = use.split('.') use = getattr(importlib.import_module('.'.join(use[:-1])), use[-1]) overwrite_defaults = options['overwrite_defaults'] self.verbosity = int(options['verbosity']) models = [] for attr in attrs: try: app_label, model_label = attr.split('.') model_label, count = model_label.split(':') count = int(count) except ValueError: raise CommandError( u'Invalid argument: {0}\n' u'Expected: app_label.ModelName:count ' u'(count must be a number)'.format(attr)) model = get_model(app_label, model_label) if not model: raise CommandError( u'Unknown model: {0}.{1}'.format(app_label, model_label)) models.append((model, count)) signals.instance_created.connect( self.print_instance) autofixture.autodiscover() kwargs = { 'overwrite_defaults': overwrite_defaults, 'follow_fk': follow_fk, 'generate_fk': generate_fk, 'follow_m2m': follow_m2m, 'generate_m2m': generate_m2m, } for model, count in models: if use: fixture = use(model, **kwargs) fixture.create(count) else: autofixture.create(model, count, **kwargs)