class GoogleAnalyticsToken(models.Model): business = models.ForeignKey(BpBasic, related_name='ga_token') data = JSONCharField(max_length=10000) created = models.DateTimeField(auto_now_add=True) user = models.ForeignKey(User, null=True, blank=False, related_name='ga_token_user')
class AudioSimilarityBlock(BaseSettingBlock): type = 'audio-similarity' intervals = models.IntegerField(help_text=l_( "How many different choices are available on the slider. For example, 5 will limit the options to 5 different places on the slider" )) show_ticks = models.BooleanField(help_text=l_( "If true, then the slider will have tick marks indicating where the response options lie on the slider." )) timing_first_stim = models.IntegerField( help_text=l_("How long to play the first sound for in milliseconds.")) timing_second_stim = models.IntegerField(help_text=l_( "How long to play the second sound for in milliseconds. -1 will show the stimulus until a response is made by the subject." )) timing_gap = models.IntegerField( help_text=l_("How long is the gap between the two sounds"), blank=True, null=True) timeout = models.IntegerField(help_text=l_( "time limit for the participant before the trial automatically advances" ), default=-1) timeout_message = models.CharField( max_length=128, blank=True, null=True, help_text=l_( 'message to display if the participant takes too long to respond')) prompt = models.CharField( max_length=32, blank=True, help_text=l_( "Any content here will be displayed below the stimulus, as a reminder to the participant" )) labels = JSONCharField( max_length=64, help_text=l_( 'An array of tags to label the slider. must be eclosed in square brackets. Each label must be enclosed in double quotation marks. Labels must be separated by a single comma.' )) def toDict(self): initial = super(AudioSimilarityBlock, self).toDict() initial['prompt'] = "<p class=\"prompt\"> {} </p>".format(self.prompt) initial[ 'timeout_message'] = "<p class=\"feedback error\">{} </p>".format( self.timeout_message) return initial
class TeacherClassRelation(models.Model): teacher = models.ForeignKey(to='user.Teacher', related_name='class_relations', verbose_name='教师') clazz = models.ForeignKey(to='Class', related_name='teacher_relations', verbose_name='班级') position = models.ManyToManyField(to=TeacherPosition, verbose_name='位置-废弃') role = JSONCharField(default=get_default_role, max_length=32, verbose_name='角色') def __str__(self): return '%s-%s-%s' % (self.teacher.name, self.clazz.name, self.position) class Meta: verbose_name = '教师班级关系' verbose_name_plural = verbose_name db_table = 'school_teacher_class_relation' unique_together = ('teacher', 'clazz')
class ModelForPlugins1(models.Model): json_field1 = JSONCharField(max_length=10) json_field2 = JSONField() class Meta: app_label = 'django_dynamic_fixture'
class ScriptParameter(UpdateScriptsMixin, WooeyPy2Mixin, models.Model): """ This holds the parameter mapping for each script, and enforces uniqueness by each script via a FK. """ parser = models.ForeignKey('ScriptParser', on_delete=models.PROTECT) script_version = models.ManyToManyField('ScriptVersion') short_param = models.CharField(max_length=255, blank=True) script_param = models.TextField() slug = AutoSlugField(populate_from='script_param', unique=True) is_output = models.BooleanField(default=None) required = models.BooleanField(default=False) choices = models.CharField(max_length=255, null=True, blank=True) choice_limit = models.CharField(max_length=10, null=True, blank=True) collapse_arguments = models.BooleanField( default=True, help_text= _('Collapse separate inputs to a given argument to a single input (ie: --arg 1 --arg 2 becomes --arg 1 2)' )) form_field = models.CharField(max_length=255) default = JSONCharField(max_length=255, null=True, blank=True) input_type = models.CharField( max_length=255, help_text= _('The python type expected by the script (e.g. boolean, integer, file).' ), ) custom_widget = models.ForeignKey('WooeyWidget', null=True, blank=True, on_delete=models.SET_NULL) param_help = models.TextField(verbose_name=_('help'), null=True, blank=True) is_checked = models.BooleanField(default=False) hidden = models.BooleanField(default=False) parameter_group = models.ForeignKey('ScriptParameterGroup', on_delete=models.PROTECT) param_order = models.SmallIntegerField( help_text=_('The order the parameter appears to the user.'), default=0) class Meta: app_label = 'wooey' verbose_name = _('script parameter') verbose_name_plural = _('script parameters') @property def form_slug(self): return '{}-{}'.format(self.parser.pk, self.slug) @property def multiple_choice(self): choice_limit = json.loads(self.choice_limit) if choice_limit is None: return False try: choice_limit = int(choice_limit) except ValueError: # it's not a set # of choices that is a max, it's either >=0, or >=1, which are the same for a front-end # since validation of >=0 or >=1 is performed outside of the form. return True else: return choice_limit > 1 @property def max_choices(self): choice_limit = json.loads(self.choice_limit) if choice_limit is None: return 1 try: choice_limit = int(choice_limit) except ValueError: # for this, it's either >=0 or >=1 so as many as they want. return -1 else: return choice_limit def __str__(self): scripts = ', '.join( [i.script.script_name for i in self.script_version.all()]) return '{}: {}'.format(scripts, self.script_param)
class ModelForPlugins1(models.Model): json_field1 = JSONCharField(max_length=10) json_field2 = JSONField()
class JSONCharModel(models.Model): json = JSONCharField(max_length=100) default_json = JSONCharField(max_length=100, default={"check": 34})
class SimilarityBlock(BaseSettingBlock): type = 'similarity' show_response_choices = ( ('FIRST_STIMULUS', l_('With the first stimulus')), ('SECOND_STIMULUS', l_('With the second stimulus')), ('POST_STIMULUS', l_('After both stimuli have disappeared')), ) intervals = models.IntegerField(help_text=l_( "How many different choices are available on the slider. For example, 5 will limit the options to 5 different places on the slider" )) show_ticks = models.BooleanField(help_text=l_( "If true, then the slider will have tick marks indicating where the response options lie on the slider." )) show_response = models.CharField( max_length=16, choices=show_response_choices, help_text=l_("When should the response slider be shown?")) timing_fixation_cross = models.IntegerField( default=1500, help_text=l_( "How long to show the fixation cross for in milliseconds.")) timing_first_stim = models.IntegerField(help_text=l_( "How long to show the first stimulus for in milliseconds.")) timing_second_stim = models.IntegerField(help_text=l_( "How long to show the second stimulus for in milliseconds. -1 will show the stimulus until a response is made by the subject." )) timing_image_gap = models.IntegerField(help_text=l_( "How long to show a blank screen in between the two stimuli.")) timing_post_trial = models.IntegerField(help_text=l_( "Sets the time, in milliseconds, between the current trial and the next trial." )) is_audio = models.BooleanField( default=False, help_text=l_("If you use audio stimuli, check this box.")) timeout = models.IntegerField(help_text=l_( "time limit for the participant before the trial automatically advances" ), default=-1) timeout_message = models.CharField( max_length=128, blank=True, null=True, help_text=l_( 'message to display if the participant takes too long to respond')) prompt = models.CharField( max_length=32, blank=True, help_text=l_( "Any content here will be displayed below the stimulus, as a reminder to the participant" )) labels = JSONCharField( max_length=64, help_text=l_( 'An array of tags to label the slider. must be eclosed in square brackets. Each label must be enclosed in double quotation marks. Labels must be separated by a single comma.' )) def toDict(self): initial = super(SimilarityBlock, self).toDict() initial['prompt'] = "<p class=\"prompt\"> {} </p>".format(self.prompt) initial[ 'timeout_message'] = "<p class=\"feedback error\">{} </p>".format( self.timeout_message) return initial
class TestResult(models.Model): """ Результаты теста """ TEST_STATUS_PASS = '******' TEST_STATUS_FAIL = 'fail' TEST_STATUS_DEBUG = 'dbg' STATUS_UPLOADING = 'upl' STATUS_UPLOADED = 'upld' STATUS_PREPARE = 'pre' STATUS_RUNNING = 'run' STATUS_ARCHIVING = 'arc' STATUS_STORING = 'stor' STATUS_UNKNOWN = 'unk' STATUS_DONE = 'done' TEST_STATUS_CHOICES = ( (TEST_STATUS_PASS, 'Pass'), (TEST_STATUS_FAIL, 'Fail'), (TEST_STATUS_DEBUG, 'Debug'), (STATUS_UNKNOWN, 'Unknown'), ) STATUS_CHOICES = ( (STATUS_UPLOADING, 'Uploading'), (STATUS_UPLOADED, 'Uploaded'), (STATUS_PREPARE, 'Prepare'), (STATUS_RUNNING, 'Running'), (STATUS_ARCHIVING, 'Archiving'), (STATUS_STORING, 'Storing'), (STATUS_DONE, 'Done'), (STATUS_UNKNOWN, 'Unknown'), ) session_id = models.CharField(u"ID сессии", max_length=32, help_text=u"ID теста", null=True, blank=True, unique=True) scenario_path = models.CharField(u"Путь к сценарию", max_length=256, help_text=u"Путь к ini-файлу " u"в репозитории", null=False, default='unknown') dt_start = models.DateTimeField(u'Дата и время начала теста', null=True, blank=True) dt_finish = models.DateTimeField(u'Дата и время завершения теста', null=True, blank=True) group = models.CharField(u'Продукт', max_length=32, help_text=u'Продукт к которому относится тест', null=True, blank=True) test_name = models.CharField(u'Тест', max_length=128, help_text=u'Название теста', null=True, blank=True) target = models.CharField(u'Target', max_length=128, help_text=u'Нагружаемая система', null=True, blank=True) version = models.CharField(u'Версия', max_length=128, help_text=u'Версия системы', null=True, blank=True) rps = models.CharField('RPS', max_length=128, help_text=u'Подаваемая нагрузка', null=True, blank=True) q99 = models.FloatField('99%', help_text='Квантиль 99%', null=True, blank=True) q90 = models.FloatField('90%', help_text='Квантиль 90%', null=True, blank=True) q50 = models.FloatField('50%', help_text='Квантиль 50%', null=True, blank=True) http_errors_perc = models.FloatField(u'http-ошибки', help_text='', null=True, blank=True) net_errors_perc = models.FloatField(u'net-ошибки', help_text='', null=True, blank=True) # length graph_url = models.CharField(u'Графики', max_length=256, help_text=u'html ссылки на графики', null=True, blank=True) generator = models.CharField(u'Генератор', max_length=128, help_text=u'сервер генератор нагрузки', null=True, blank=True) generator_type_list = models.ForeignKey(GeneratorTypeList, default=1, null=False, blank=False) generator_types = models.ManyToManyField(GeneratorType, related_name='generator_types') user = models.CharField('SPE', max_length=128, help_text=u'кто запускал тест', null=True, blank=True) ticket_id = models.CharField(u'Тикет', max_length=64, help_text='', null=True, blank=True) mnt_url = models.CharField(u'Методика НТ', max_length=256, help_text='', null=True, blank=True) comments = models.TextField(u'Комментарии', max_length=1024, help_text=u'Комментарий к результатам теста', null=True, blank=True) # dt_start2 = models.DateTimeField(help_text='Дата и время начала теста (зачетный период)', null=True, blank=True) # dt_finish2 = models.DateTimeField(help_text='Дата и время завершения теста (зачетный период)', null=True, # blank=True) # status = models.CharField('Статус выполнения', max_length=4, choices=STATUS_CHOICES, default=STATUS_UNKNOWN, # help_text='Статус выполнения теста: Uploading -> Uploaded -> [Prepare]* -> Running -> ' + # 'Archiving -> Storing -> Done</br>\n' + # '* - в скобках опциональный статус (может быть пропущен).') test_status = models.CharField( u'Финальный статус', max_length=4, choices=TEST_STATUS_CHOICES, default=STATUS_UNKNOWN, help_text=u'Финальный статус теста - Pass или Fail.') # test_scenario = models.ForeignKey(TestScenario) # results = models.FileField('Архив с результатами работы СНТ', upload_to='results/%Y/%m/%d', null=True, # blank=True) # ltt_server = models.ForeignKey(Server, verbose_name='Нагрузочный сервер', null=True) # environment_config = models.ForeignKey(EnvironmentConfig, null=True) metrics = models.FileField(u'Метрики', upload_to='results/%Y/%m/%d', null=True, blank=True) jm_jtl = models.FileField(u'jtl (сырые результаты jmeter)', upload_to='results/%Y/%m/%d', null=True, blank=True) phout = models.FileField(u'phout (сырые результаты phantom)', upload_to='results/%Y/%m/%d', null=True, blank=True) yt_log = models.FileField(u'Лог yandex-tank', upload_to='results/%Y/%m/%d', null=True, blank=True) jm_log = models.FileField(u'Лог jmeter', upload_to='results/%Y/%m/%d', null=True, blank=True) yt_conf = models.FileField(u'Конфиг yandex-tank', upload_to='results/%Y/%m/%d', null=True, blank=True) ph_conf = models.FileField(u'Конфиг phantom', upload_to='results/%Y/%m/%d', null=True, blank=True) modified_jmx = models.FileField('modified.jmx', upload_to='results/%Y/%m/%d', null=True, blank=True) console_log = models.FileField(u'Лог консоли', upload_to='results/%Y/%m/%d', null=True, blank=True) report_txt = models.FileField(u'Текстовый отчет SALTSReport', upload_to='results/%Y/%m/%d', null=True, blank=True) jm_log_2 = models.FileField(u'Дополнительный лог jmeter (testResults.txt)', upload_to='results/%Y/%m/%d', null=True, blank=True) meta = JSONCharField(max_length=1024, null=True, blank=True, help_text=u'Служебная информация - не изменять.') def __unicode__(self): # Python 3: def __str__(self): return self.group + '.' + self.test_name + ' ' \ + self.version + ' ' + self.session_id def get_name(self): # Python 3: def __str__(self): return self.group + '.' + self.test_name