예제 #1
0
def PostHits_loop():
    # To post a HIT, first connect to Turk using our access codes:
    # The ACCESS_ID and SECRET_KEY are loaded before this function is called
    # (e.g., from alvarezlab import * );
    mtc = MTurkConnection(aws_access_key_id=ACCESS_ID,
                          aws_secret_access_key=SECRET_KEY,
                          host=HOST)

    for x in range(startHSet, endHSet + 1):
        HSet = x
        urlForHIT = "https://scorsese.wjh.harvard.edu/turk/experiments/cfm/Search10/index_cmtrial_Search10.html?HSetNum=%d" % HSet

        # Now lets setup a structure for our external HIT. We need the URL we want to be
        # shown within the Turk window and also how tall we want the Turk iframe to be:
        q = ExternalQuestion(external_url=urlForHIT, frame_height=frameHeight)

        # And any qualifications we want people to have:
        qualifications = mtqu.Qualifications()

        qualifications.add(
            mtqu.PercentAssignmentsApprovedRequirement(
                'GreaterThanOrEqualTo', percentAssignmentsApprovedRequirement))
        qualifications.add(mtqu.LocaleRequirement("EqualTo",
                                                  localeRequirement))
        if (qualificationID != "NONE"):
            qualifications.add(
                mtqu.Requirement(qualificationID, "EqualTo", 1,
                                 notifyWorkerOfQualification))

        # Post:
        theHIT = mtc.create_hit(
            question=q,
            lifetime=minToSec(minutesBeforeHitExpires),
            max_assignments=numAssignmentsToPost,
            title=titleForWorkers,
            description=descriptionForWorkers,
            keywords=keywordsForWorkers,
            qualifications=qualifications,
            reward=pay,
            duration=minToSec(minutesForUsersToFinish),
            approval_delay=minToSec(minutesBeforeAutoApproved),
            annotation=projectNameForRequesters)

        # get more info about the hit
        hit = mtc.get_hit(theHIT[0].HITId)

        # Print out the HIT's ID if all went well:
        # pprint(vars(hit[0]))
        # print "Experiment info:"
        # print HOST
        print "preview hit in HSet ", HSet, ": "
        # print urlForHIT, "\n"
        # print "HITId"
        # print theHIT[0].HITId, "\n"
        print PREVIEW + "?groupId=" + hit[0].HITGroupId, "\n"
예제 #2
0
파일: turk.py 프로젝트: dpfried/phrasenode
def standard_quals(debug):
    """Construct qualifications for MTurk task.

    Args:
        debug (bool): if True, use a special qualification that only we possess.
    """
    quals = mtqu.Qualifications()
    if debug:
        quals.add(mtqu.Requirement("3B9TX86P8NTZUJU62N2SLJ1DZCERL5", "EqualTo", 100, required_to_preview=True))
    else:
        quals.add(mtqu.LocaleRequirement("EqualTo", "US", required_to_preview=True))
        quals.add(mtqu.PercentAssignmentsApprovedRequirement("GreaterThan", 95))
        quals.add(mtqu.NumberHitsApprovedRequirement("GreaterThan", 10))
    return quals
예제 #3
0
def PostHits():
    mtc = MTurkConnection(aws_access_key_id=ACCESS_ID,
                          aws_secret_access_key=SECRET_KEY,
                          host=HOST)

    q = ExternalQuestion(
        external_url="https://paulscotti.github.io/mturk/ContLTMBlocked90",
        frame_height=675)
    keywords = [
        'memory', 'psychology', 'game', 'attention', 'experiment', 'research'
    ]
    title = 'Memorize the colors of objects! (Psychology Experiment, 1.5 hours)'
    experimentName = 'Cont_LTM_90'
    description = 'Research study involving color memory.'
    pay = 9.00

    qualifications = mtqu.Qualifications()
    qualifications.add(
        mtqu.PercentAssignmentsApprovedRequirement('GreaterThanOrEqualTo', 98))
    qualifications.add(
        mtqu.NumberHitsApprovedRequirement('GreaterThanOrEqualTo', 1000))
    qualifications.add(mtqu.LocaleRequirement("EqualTo", "US"))
    qualifications.add(
        mtqu.Requirement(
            '38XLDN1M8DBWG1FPHU43ZCVTZ4T3DT', 'DoesNotExist', '',
            'DiscoverPreviewAndAccept'))  # No prior workers of ours
    qualifications.add(
        mtqu.Requirement('2F1QJWKUDD8XADTFD2Q0G6UTO95ALH', 'Exists', '',
                         'DiscoverPreviewAndAccept'))  # Masters only

    theHIT = mtc.create_hit(
        question=q,
        lifetime=2 * 60 * 60,  # 2 hours
        max_assignments=1,  #needs to be less than 10 else additional fees
        title=title,
        description=description,
        keywords=keywords,
        qualifications=qualifications,
        reward=pay,
        duration=180 *
        60,  #3 hours (HIT won't be accepted if they go over time)
        approval_delay=1 * 60 * 60,  # 1 hours
        annotation=experimentName)

    assert (theHIT.status == True)
    print theHIT
    print theHIT[0].HITId
예제 #4
0
def post_hits(hit_info, n_sub_hits):

    mtc = MTurkConnection(aws_access_key_id=access_id,
                          aws_secret_access_key=secret_key,
                          host=host)
    q = ExternalQuestion(external_url=hit_info['external_url'],
                         frame_height=hit_info['frame_height'])
    qualifications = mtqu.Qualifications()
    qualifications.add(
        mtqu.PercentAssignmentsApprovedRequirement(
            'GreaterThanOrEqualTo', hit_info['approval_rating_cutoff']))
    qualifications.add(mtqu.LocaleRequirement("EqualTo", "US"))

    print('url:', hit_info['external_url'], n_sub_hits)

    the_HIT = mtc.create_hit(question=q,
                             lifetime=hit_info['lifetime_of_experiment'],
                             max_assignments=n_sub_hits,
                             title=hit_info['title'],
                             description=hit_info['description'],
                             keywords=hit_info['keywords'],
                             qualifications=qualifications,
                             reward=hit_info['payment_for_experiment'],
                             duration=hit_info['duration_of_experiment'],
                             approval_delay=hit_info['approval_delay'],
                             annotation=hit_info['experiment_name'])

    assert (the_HIT.status == True)

    hit_info['hit_id'] = the_HIT[0].HITId
    hit_url = "{}{}".format(base_url, the_HIT[0].HITTypeId)
    hit_info['hit_url'] = hit_url

    record_name = 'HIT_submission_records_%s.npy' % (context)

    if record_name not in os.listdir(os.getcwd()):
        turk_info = {}
    else:
        turk_info = np.load(record_name).item()

    key_name = 'submission_%d' % len(turk_info.keys())
    turk_info[key_name] = hit_info
    np.save(record_name, turk_info)

    print('HIT_ID:', the_HIT[0].HITId, 'key_name', key_name,
          "\nwhich you can see here:", hit_url)
def PostHits(pay, lifetime, max_assignments, exp):
  mtc = MTurkConnection(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,host=HOST)
  
  
  q = ExternalQuestion(external_url = "https://davebraun.org/dissertation/experiments/production/" + exp +"/", frame_height=675)
  keywords = ['attention', 'psychology', 'experiment', 'research']
  title = 'A Decision Making Experiment'
  experimentName = 'Decision Making Experiment' ## this is NOT what it ends up getting called on my server
  description = 'This HIT will take about 30 mins to complete. All HITS in this batch are the same, and you will only be able to perform one of the HITS in this batch.'
  
  qualifications = mtqu.Qualifications()
  qualifications.add(mtqu.PercentAssignmentsApprovedRequirement('GreaterThanOrEqualTo', 90))
  qualifications.add(mtqu.LocaleRequirement("EqualTo", "US"))
  #qualifications.add(mtqu.Requirement("2Z046OQ1SNQQREGXAFSQPCNR1605PN"))

  theHIT = mtc.create_hit(question=q,
                          lifetime=lifetime,
                          max_assignments=max_assignments,
                          title=title,
                          description=description,
                          keywords=keywords,
                          qualifications=qualifications,
                          reward=pay,
                          duration=120 * 60, # 120 minutes
                          approval_delay=2* 24 * 60 * 60, # the number of seconds after an assignment is submitted is it automatically approved unless explicitly rejected
                          ## the norm is to try to keep this under 7 days, many requesters approve in less than 3 days
                          annotation=experimentName)

  assert(theHIT.status == True)
  print theHIT
  hit_type_id = theHIT[0].HITId
  print hit_type_id + '\n'
  #print "https://workersandbox.mturk.com/mturk/preview?groupId={}".format(hit_type_id)

  '''
  f = open('hit_id.txt', 'w')
  f.write(hit_type_id)
  f.close()
  '''

  return hit_type_id
예제 #6
0
def ManualReward():
    mtc = MTurkConnection(aws_access_key_id=ACCESS_ID,
                          aws_secret_access_key=SECRET_KEY,
                          host=HOST)

    q = ExternalQuestion(
        external_url="https://paulscotti.github.io/mturk/RewardPage",
        frame_height=675)
    keywords = ['compensation', 'experiment', 'research']
    title = 'Compensation for Cont_LTM color experiment'
    experimentName = 'Cont_LTM_Reward'
    description = 'Compensating a previous worker of the Cont_LTM study.'
    pay = 10.27

    #for adding new people
    #mtc.assign_qualification('35GMP5037Q9KDB285EZ1DGC38JE39C','A1N1EF0MIRSEZZ');

    qualifications = mtqu.Qualifications()
    qualifications.add(
        mtqu.Requirement('35GMP5037Q9KDB285EZ1DGC38JE39C', 'Exists', '',
                         'DiscoverPreviewAndAccept'))

    rewardHIT = mtc.create_hit(
        question=q,
        lifetime=12 * 60 * 60,  #12 hours
        max_assignments=1,  #needs to be less than 10 else additional fees
        title=title,
        description=description,
        keywords=keywords,
        qualifications=qualifications,
        reward=pay,
        duration=10 *
        60,  #10 minutes (HIT won't be accepted if they go over time)
        approval_delay=12 * 60 * 60,  # 12 hours
        annotation=experimentName)

    assert (rewardHIT.status == True)
    print rewardHIT
    print rewardHIT[0].HITId
def PostHits():
    mtc = MTurkConnection(aws_access_key_id=ACCESS_ID,
                          aws_secret_access_key=SECRET_KEY,
                          host=HOST)

    # q = ExternalQuestion(external_url = "https://timbrady.org/turk/monthnumber/", frame_height=675)
    q = ExternalQuestion(
        external_url=
        "https://github.com/mice-annotator/mice-annotator.github.io",
        frame_height=675)
    keywords = ['mice', 'bounding box']
    title = 'I love mice so much!'
    experimentName = "mice_bounding_box"
    description = 'Draw bounding box for mice in the frame.'
    pay = 0.02

    qualifications = mtqu.Qualifications()
    qualifications.add(
        mtqu.PercentAssignmentsApprovedRequirement('GreaterThanOrEqualTo', 90))
    qualifications.add(mtqu.LocaleRequirement("EqualTo", "US"))
    #qualifications.add(mtqu.Requirement("2Z046OQ1SNQQREGXAFSQPCNR1605PN"))

    theHIT = mtc.create_hit(
        question=q,
        lifetime=10 * 60 * 60,  # 10 hours
        max_assignments=3,
        title=title,
        description=description,
        keywords=keywords,
        qualifications=qualifications,
        reward=pay,
        duration=120 * 60,  # 120 minutes
        approval_delay=5 * 60 * 60,  # 5 hours
        annotation=experimentName)

    assert (theHIT.status == True)
    print(theHIT)
    print(theHIT[0].HITId)
예제 #8
0
def get_worker_qualifications(approval_rate, total_approved):
	reqs = qualification.Qualifications()
	reqs.add(qualification.PercentAssignmentsApprovedRequirement(comparator = "GreaterThan", integer_value = str(approval_rate), required_to_preview=True))
	reqs.add(qualification.LocaleRequirement(comparator = "EqualTo", locale ="US", required_to_preview=True))
	reqs.add(qualification.NumberHitsApprovedRequirement(comparator = "GreaterThan", integer_value = str(total_approved), required_to_preview=True))
	return reqs                         
예제 #9
0
    def make_html_elicitation_HIT(self,
                                  prompt_list,
                                  hit_title,
                                  prompt_title,
                                  keywords,
                                  hit_description,
                                  duration=DEFAULT_DURATION,
                                  reward_per_clip=DEFAULT_REWARD,
                                  max_assignments=DEFAULT_MAX_ASSIGNMENTS):
        overview = Overview()
        overview.append_field(
            "Title", "Record yourself speaking the words in the prompt.")
        descriptions = [
            "The following prompts are in English.",
            "Click the prompt to record your voice (Redirects to recording Page).",
            "Follow the directions on that page.",
            "Copy and paste the URL in box below the prompt on this page."
        ]
        keywords = "audio, recording, elicitation, English"

        html_head = self.elicitation_head.replace(self.html_tags["title"],
                                                  hit_title)
        for description in descriptions:
            html_head = html_head.replace(
                self.html_tags["description"], "<li>" + description +
                "</li>\n" + self.html_tags["description"])
        questions_html = []
        prompt_ids = []

        for prompt_words, prompt_id in prompt_list:
            #For each prompt, generate the question html given the template
            prompt_id = str(prompt_id)
            prompt = " ".join(prompt_words)
            underscored_prompt = "_".join(prompt_words)
            question = self.elicitation_question.replace(
                self.html_tags["prompt"], prompt)
            question = question.replace(self.html_tags["underscored_prompt"],
                                        underscored_prompt)
            question = question.replace(self.html_tags["prompt_id"],
                                        str(prompt_id))
            questions_html.append(question)
            prompt_ids.append(prompt_id)

        for prompt_id in prompt_ids:
            #Disable the inputs for the prompts, which are just text fields for the
            #audio recording URLs
            script = self.disable_input_script.replace("${input_id}",
                                                       prompt_id)
            html_head = html_head.replace(self.html_tags["disable_script"],script+\
                                          "\n"+self.html_tags["disable_script"])
            if (self.html_tags["prompt_id"]) in html_head:
                html_head = html_head.replace(self.html_tags["prompt_id"],"'"+prompt_id+"'"+\
                                              ","+self.html_tags["prompt_id"])
        #Get rid of html tags
        html_head = html_head.replace(self.html_tags["disable_script"], "")
        html_head = html_head.replace("," + self.html_tags["prompt_id"], "")
        html_head = html_head.replace(self.html_tags["description"], "")
        html = html_head

        for question in questions_html:
            html += question

        html += self.transcription_tail
        html_question = HTMLQuestion(html, 800)
        open("/home/taylor/csaesr/tmp/hithtml.html", "w").write(html)

        #reward calculation

        quals = qualification.Qualifications()
        quals.add(qualification.LocaleRequirement("EqualTo", "US"))
        reward = reward_per_clip * len(prompt_list)
        try:
            return self.conn.create_hit(title=hit_title,
                                        question=html_question,
                                        max_assignments=max_assignments,
                                        description=hit_description,
                                        qualifications=quals,
                                        keywords=keywords,
                                        duration=duration,
                                        reward=reward)
        except MTurkRequestError as e:
            if e.reason != "OK":
                raise
            else:
                print(e)
                return False
        return False
예제 #10
0
def default_qualifications():
    quals = mtqual.Qualifications()
    quals.add(mtqual.LocaleRequirement("EqualTo", "US"))
    quals.add(mtqual.PercentAssignmentsApprovedRequirement("GreaterThan", 95))
    quals.add(mtqual.NumberHitsApprovedRequirement("GreaterThan", 10))
    return quals