예제 #1
0
def PostHits_loop():
    # To post a HIT, first connect to Turk using our access codes:
    # The ACCESS_ID and SECRET_KEY are loaded before this function is called
    # (e.g., from alvarezlab import * );
    mtc = MTurkConnection(aws_access_key_id=ACCESS_ID,
                          aws_secret_access_key=SECRET_KEY,
                          host=HOST)

    for x in range(startHSet, endHSet + 1):
        HSet = x
        urlForHIT = "https://scorsese.wjh.harvard.edu/turk/experiments/cfm/Search10/index_cmtrial_Search10.html?HSetNum=%d" % HSet

        # Now lets setup a structure for our external HIT. We need the URL we want to be
        # shown within the Turk window and also how tall we want the Turk iframe to be:
        q = ExternalQuestion(external_url=urlForHIT, frame_height=frameHeight)

        # And any qualifications we want people to have:
        qualifications = mtqu.Qualifications()

        qualifications.add(
            mtqu.PercentAssignmentsApprovedRequirement(
                'GreaterThanOrEqualTo', percentAssignmentsApprovedRequirement))
        qualifications.add(mtqu.LocaleRequirement("EqualTo",
                                                  localeRequirement))
        if (qualificationID != "NONE"):
            qualifications.add(
                mtqu.Requirement(qualificationID, "EqualTo", 1,
                                 notifyWorkerOfQualification))

        # Post:
        theHIT = mtc.create_hit(
            question=q,
            lifetime=minToSec(minutesBeforeHitExpires),
            max_assignments=numAssignmentsToPost,
            title=titleForWorkers,
            description=descriptionForWorkers,
            keywords=keywordsForWorkers,
            qualifications=qualifications,
            reward=pay,
            duration=minToSec(minutesForUsersToFinish),
            approval_delay=minToSec(minutesBeforeAutoApproved),
            annotation=projectNameForRequesters)

        # get more info about the hit
        hit = mtc.get_hit(theHIT[0].HITId)

        # Print out the HIT's ID if all went well:
        # pprint(vars(hit[0]))
        # print "Experiment info:"
        # print HOST
        print "preview hit in HSet ", HSet, ": "
        # print urlForHIT, "\n"
        # print "HITId"
        # print theHIT[0].HITId, "\n"
        print PREVIEW + "?groupId=" + hit[0].HITGroupId, "\n"
예제 #2
0
파일: turk.py 프로젝트: dpfried/phrasenode
def standard_quals(debug):
    """Construct qualifications for MTurk task.

    Args:
        debug (bool): if True, use a special qualification that only we possess.
    """
    quals = mtqu.Qualifications()
    if debug:
        quals.add(mtqu.Requirement("3B9TX86P8NTZUJU62N2SLJ1DZCERL5", "EqualTo", 100, required_to_preview=True))
    else:
        quals.add(mtqu.LocaleRequirement("EqualTo", "US", required_to_preview=True))
        quals.add(mtqu.PercentAssignmentsApprovedRequirement("GreaterThan", 95))
        quals.add(mtqu.NumberHitsApprovedRequirement("GreaterThan", 10))
    return quals
예제 #3
0
    def test_boto2_import(self):
        '''testing the otree-boto2-shim'''
        from boto.mturk import qualification
        quals = [
            qualification.LocaleRequirement("EqualTo", "US"),
            qualification.PercentAssignmentsApprovedRequirement(
                "GreaterThanOrEqualTo", 50),
            qualification.NumberHitsApprovedRequirement(
                "GreaterThanOrEqualTo", 5)
        ]

        # make sure the import is actually using the boto2 shim,
        # not the actual boto2 package that may still be lying around
        for qual in quals:
            self.assertEqual(qual, None)
예제 #4
0
def PostHits():
    mtc = MTurkConnection(aws_access_key_id=ACCESS_ID,
                          aws_secret_access_key=SECRET_KEY,
                          host=HOST)

    q = ExternalQuestion(
        external_url="https://paulscotti.github.io/mturk/ContLTMBlocked90",
        frame_height=675)
    keywords = [
        'memory', 'psychology', 'game', 'attention', 'experiment', 'research'
    ]
    title = 'Memorize the colors of objects! (Psychology Experiment, 1.5 hours)'
    experimentName = 'Cont_LTM_90'
    description = 'Research study involving color memory.'
    pay = 9.00

    qualifications = mtqu.Qualifications()
    qualifications.add(
        mtqu.PercentAssignmentsApprovedRequirement('GreaterThanOrEqualTo', 98))
    qualifications.add(
        mtqu.NumberHitsApprovedRequirement('GreaterThanOrEqualTo', 1000))
    qualifications.add(mtqu.LocaleRequirement("EqualTo", "US"))
    qualifications.add(
        mtqu.Requirement(
            '38XLDN1M8DBWG1FPHU43ZCVTZ4T3DT', 'DoesNotExist', '',
            'DiscoverPreviewAndAccept'))  # No prior workers of ours
    qualifications.add(
        mtqu.Requirement('2F1QJWKUDD8XADTFD2Q0G6UTO95ALH', 'Exists', '',
                         'DiscoverPreviewAndAccept'))  # Masters only

    theHIT = mtc.create_hit(
        question=q,
        lifetime=2 * 60 * 60,  # 2 hours
        max_assignments=1,  #needs to be less than 10 else additional fees
        title=title,
        description=description,
        keywords=keywords,
        qualifications=qualifications,
        reward=pay,
        duration=180 *
        60,  #3 hours (HIT won't be accepted if they go over time)
        approval_delay=1 * 60 * 60,  # 1 hours
        annotation=experimentName)

    assert (theHIT.status == True)
    print theHIT
    print theHIT[0].HITId
예제 #5
0
def post_hits(hit_info, n_sub_hits):

    mtc = MTurkConnection(aws_access_key_id=access_id,
                          aws_secret_access_key=secret_key,
                          host=host)
    q = ExternalQuestion(external_url=hit_info['external_url'],
                         frame_height=hit_info['frame_height'])
    qualifications = mtqu.Qualifications()
    qualifications.add(
        mtqu.PercentAssignmentsApprovedRequirement(
            'GreaterThanOrEqualTo', hit_info['approval_rating_cutoff']))
    qualifications.add(mtqu.LocaleRequirement("EqualTo", "US"))

    print('url:', hit_info['external_url'], n_sub_hits)

    the_HIT = mtc.create_hit(question=q,
                             lifetime=hit_info['lifetime_of_experiment'],
                             max_assignments=n_sub_hits,
                             title=hit_info['title'],
                             description=hit_info['description'],
                             keywords=hit_info['keywords'],
                             qualifications=qualifications,
                             reward=hit_info['payment_for_experiment'],
                             duration=hit_info['duration_of_experiment'],
                             approval_delay=hit_info['approval_delay'],
                             annotation=hit_info['experiment_name'])

    assert (the_HIT.status == True)

    hit_info['hit_id'] = the_HIT[0].HITId
    hit_url = "{}{}".format(base_url, the_HIT[0].HITTypeId)
    hit_info['hit_url'] = hit_url

    record_name = 'HIT_submission_records_%s.npy' % (context)

    if record_name not in os.listdir(os.getcwd()):
        turk_info = {}
    else:
        turk_info = np.load(record_name).item()

    key_name = 'submission_%d' % len(turk_info.keys())
    turk_info[key_name] = hit_info
    np.save(record_name, turk_info)

    print('HIT_ID:', the_HIT[0].HITId, 'key_name', key_name,
          "\nwhich you can see here:", hit_url)
def PostHits(pay, lifetime, max_assignments, exp):
  mtc = MTurkConnection(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,host=HOST)
  
  
  q = ExternalQuestion(external_url = "https://davebraun.org/dissertation/experiments/production/" + exp +"/", frame_height=675)
  keywords = ['attention', 'psychology', 'experiment', 'research']
  title = 'A Decision Making Experiment'
  experimentName = 'Decision Making Experiment' ## this is NOT what it ends up getting called on my server
  description = 'This HIT will take about 30 mins to complete. All HITS in this batch are the same, and you will only be able to perform one of the HITS in this batch.'
  
  qualifications = mtqu.Qualifications()
  qualifications.add(mtqu.PercentAssignmentsApprovedRequirement('GreaterThanOrEqualTo', 90))
  qualifications.add(mtqu.LocaleRequirement("EqualTo", "US"))
  #qualifications.add(mtqu.Requirement("2Z046OQ1SNQQREGXAFSQPCNR1605PN"))

  theHIT = mtc.create_hit(question=q,
                          lifetime=lifetime,
                          max_assignments=max_assignments,
                          title=title,
                          description=description,
                          keywords=keywords,
                          qualifications=qualifications,
                          reward=pay,
                          duration=120 * 60, # 120 minutes
                          approval_delay=2* 24 * 60 * 60, # the number of seconds after an assignment is submitted is it automatically approved unless explicitly rejected
                          ## the norm is to try to keep this under 7 days, many requesters approve in less than 3 days
                          annotation=experimentName)

  assert(theHIT.status == True)
  print theHIT
  hit_type_id = theHIT[0].HITId
  print hit_type_id + '\n'
  #print "https://workersandbox.mturk.com/mturk/preview?groupId={}".format(hit_type_id)

  '''
  f = open('hit_id.txt', 'w')
  f.write(hit_type_id)
  f.close()
  '''

  return hit_type_id
def PostHits():
    mtc = MTurkConnection(aws_access_key_id=ACCESS_ID,
                          aws_secret_access_key=SECRET_KEY,
                          host=HOST)

    # q = ExternalQuestion(external_url = "https://timbrady.org/turk/monthnumber/", frame_height=675)
    q = ExternalQuestion(
        external_url=
        "https://github.com/mice-annotator/mice-annotator.github.io",
        frame_height=675)
    keywords = ['mice', 'bounding box']
    title = 'I love mice so much!'
    experimentName = "mice_bounding_box"
    description = 'Draw bounding box for mice in the frame.'
    pay = 0.02

    qualifications = mtqu.Qualifications()
    qualifications.add(
        mtqu.PercentAssignmentsApprovedRequirement('GreaterThanOrEqualTo', 90))
    qualifications.add(mtqu.LocaleRequirement("EqualTo", "US"))
    #qualifications.add(mtqu.Requirement("2Z046OQ1SNQQREGXAFSQPCNR1605PN"))

    theHIT = mtc.create_hit(
        question=q,
        lifetime=10 * 60 * 60,  # 10 hours
        max_assignments=3,
        title=title,
        description=description,
        keywords=keywords,
        qualifications=qualifications,
        reward=pay,
        duration=120 * 60,  # 120 minutes
        approval_delay=5 * 60 * 60,  # 5 hours
        annotation=experimentName)

    assert (theHIT.status == True)
    print(theHIT)
    print(theHIT[0].HITId)
예제 #8
0
     There is an automatic timeout for each question, \
     but you can push the next button once you are done with the screen. Thank you',
    'frame_height':
    500,
    'preview_template':
    'global/MTurkPreview.html',
    'minutes_allotted_per_assignment':
    40,
    'expiration_hours':
    1,  # 1 day
    #'grant_qualification_id': '3LQV637WQB4JX22NPA62LG08IF76BE',    #sandbox
    'grant_qualification_id':
    '3X03PXFE93BZZPK7U8HT29SECH8OFF',  # real mturk (kristianlopezvargas)
    #'grant_qualification_id': '3SL0IB85URSUSM2RNGYM7CLMXT3JRV',      # real mturk (kecolab)
    'qualification_requirements': [
        qualification.LocaleRequirement("EqualTo", "US"),
        qualification.PercentAssignmentsApprovedRequirement(
            "GreaterThanOrEqualTo", 30),
        qualification.NumberHitsApprovedRequirement("GreaterThanOrEqualTo", 1),
        qualification.Requirement(
            '3X03PXFE93BZZPK7U8HT29SECH8OFF',
            'DoesNotExist'),  # change for sandbox or real mturk
    ]
}

# if you set a property in SESSION_CONFIG_DEFAULTS, it will be inherited by all configs
# in SESSION_CONFIGS, except those that explicitly override it.
# the session config can be accessed from methods in your apps as self.session.config,
# e.g. self.session.config['participation_fee']

SESSION_CONFIG_DEFAULTS = {
예제 #9
0
def get_worker_qualifications(approval_rate, total_approved):
	reqs = qualification.Qualifications()
	reqs.add(qualification.PercentAssignmentsApprovedRequirement(comparator = "GreaterThan", integer_value = str(approval_rate), required_to_preview=True))
	reqs.add(qualification.LocaleRequirement(comparator = "EqualTo", locale ="US", required_to_preview=True))
	reqs.add(qualification.NumberHitsApprovedRequirement(comparator = "GreaterThan", integer_value = str(total_approved), required_to_preview=True))
	return reqs                         
예제 #10
0
    def make_html_elicitation_HIT(self,
                                  prompt_list,
                                  hit_title,
                                  prompt_title,
                                  keywords,
                                  hit_description,
                                  duration=DEFAULT_DURATION,
                                  reward_per_clip=DEFAULT_REWARD,
                                  max_assignments=DEFAULT_MAX_ASSIGNMENTS):
        overview = Overview()
        overview.append_field(
            "Title", "Record yourself speaking the words in the prompt.")
        descriptions = [
            "The following prompts are in English.",
            "Click the prompt to record your voice (Redirects to recording Page).",
            "Follow the directions on that page.",
            "Copy and paste the URL in box below the prompt on this page."
        ]
        keywords = "audio, recording, elicitation, English"

        html_head = self.elicitation_head.replace(self.html_tags["title"],
                                                  hit_title)
        for description in descriptions:
            html_head = html_head.replace(
                self.html_tags["description"], "<li>" + description +
                "</li>\n" + self.html_tags["description"])
        questions_html = []
        prompt_ids = []

        for prompt_words, prompt_id in prompt_list:
            #For each prompt, generate the question html given the template
            prompt_id = str(prompt_id)
            prompt = " ".join(prompt_words)
            underscored_prompt = "_".join(prompt_words)
            question = self.elicitation_question.replace(
                self.html_tags["prompt"], prompt)
            question = question.replace(self.html_tags["underscored_prompt"],
                                        underscored_prompt)
            question = question.replace(self.html_tags["prompt_id"],
                                        str(prompt_id))
            questions_html.append(question)
            prompt_ids.append(prompt_id)

        for prompt_id in prompt_ids:
            #Disable the inputs for the prompts, which are just text fields for the
            #audio recording URLs
            script = self.disable_input_script.replace("${input_id}",
                                                       prompt_id)
            html_head = html_head.replace(self.html_tags["disable_script"],script+\
                                          "\n"+self.html_tags["disable_script"])
            if (self.html_tags["prompt_id"]) in html_head:
                html_head = html_head.replace(self.html_tags["prompt_id"],"'"+prompt_id+"'"+\
                                              ","+self.html_tags["prompt_id"])
        #Get rid of html tags
        html_head = html_head.replace(self.html_tags["disable_script"], "")
        html_head = html_head.replace("," + self.html_tags["prompt_id"], "")
        html_head = html_head.replace(self.html_tags["description"], "")
        html = html_head

        for question in questions_html:
            html += question

        html += self.transcription_tail
        html_question = HTMLQuestion(html, 800)
        open("/home/taylor/csaesr/tmp/hithtml.html", "w").write(html)

        #reward calculation

        quals = qualification.Qualifications()
        quals.add(qualification.LocaleRequirement("EqualTo", "US"))
        reward = reward_per_clip * len(prompt_list)
        try:
            return self.conn.create_hit(title=hit_title,
                                        question=html_question,
                                        max_assignments=max_assignments,
                                        description=hit_description,
                                        qualifications=quals,
                                        keywords=keywords,
                                        duration=duration,
                                        reward=reward)
        except MTurkRequestError as e:
            if e.reason != "OK":
                raise
            else:
                print(e)
                return False
        return False
예제 #11
0
    def make_question_form_elicitation_HIT(
            self,
            prompt_list,
            hit_title,
            prompt_title,
            keywords,
            duration=DEFAULT_DURATION,
            reward_per_clip=DEFAULT_REWARD,
            max_assignments=DEFAULT_MAX_ASSIGNMENTS):
        overview = Overview()
        overview.append_field("Title", hit_title)
        #overview.append(FormattedContent('<a target = "_blank" href="url">hyperlink</a>'))
        question_form = QuestionForm()

        descriptions = [
            "The following prompts are in English.",
            "Approve the flash permissions to record audio.",
            "Click the red circle to record yourself.",
            "Read the words after 'prompt:'", "Click 'Click to Stop'",
            "Play the clip back to verify sound quality.",
            "After you are happy with your recording, click 'Click here to save >>'",
            "Copy & paste the URL under 'Sharing options' into the text field for the prompt.",
            "You will NEVER be asked to divulge any personal or identifying information."
        ]
        keywords = "audio, recording, elicitation, English"

        #         for i, description in enumerate(descriptions):
        #             overview.append_field("%dDescription"%i, description)
        #         flash_xml = FlashXml(self.flash_xml.replace(self.html_tags["flash_url"],self.vocaroo_url))
        #         overview.append(flash_xml)
        question_form.append(overview)

        qc = QuestionContent()
        #        qc.append(FormattedContent(flash_xml))

        qc.append_field("Title",
                        "Please select the type of microphone you are using.")
        #         qc.append(Flash(self.vocaroo_url,525,450))
        #
        #answer = FreeTextAnswer()
        answer = SelectionAnswer(max=1,
                                 style="radiobutton",
                                 selections=self.mic_selections)
        q = Question(identifier="MIC",
                     content=qc,
                     answer_spec=AnswerSpecification(answer))
        question_form.append(q)

        qual = qualification.LocaleRequirement("in", "USA")
        reward = reward_per_clip * len(prompt_list)
        xml = question_form.get_as_xml()
        try:
            response = self.conn.create_hit(questions=question_form,
                                            max_assignments=1,
                                            title=hit_title,
                                            qualification=qual,
                                            description=descriptions[0],
                                            keywords=keywords,
                                            duration=duration,
                                            reward=reward)
        except MTurkRequestError as e:
            if e.reason != "OK":
                raise
        return True
예제 #12
0
def default_qualifications():
    quals = mtqual.Qualifications()
    quals.add(mtqual.LocaleRequirement("EqualTo", "US"))
    quals.add(mtqual.PercentAssignmentsApprovedRequirement("GreaterThan", 95))
    quals.add(mtqual.NumberHitsApprovedRequirement("GreaterThan", 10))
    return quals