def configure_hit(self, hit_config): ''' Configure HIT ''' # configure question_url based on the id experiment_portal_url = hit_config['ad_location'] frame_height = 600 mturk_question = ExternalQuestion(experiment_portal_url, frame_height) # Qualification: quals = Qualifications() approve_requirement = hit_config['approve_requirement'] quals.add( PercentAssignmentsApprovedRequirement("GreaterThanOrEqualTo", approve_requirement)) if hit_config['us_only']: quals.add(LocaleRequirement("EqualTo", "US")) # Create a HIT type for this HIT. hit_type = self.mtc.register_hit_type(hit_config['title'], hit_config['description'], hit_config['reward'], hit_config['duration'], keywords=hit_config['keywords'], approval_delay=None, qual_req=None)[0] # Check the config file to see if notifications are wanted. config = PsiturkConfig() config.load_config() try: url = config.get('Server Parameters', 'notification_url') all_event_types = [ "AssignmentAccepted", "AssignmentAbandoned", "AssignmentReturned", "AssignmentSubmitted", "HITReviewable", "HITExpired", ] self.mtc.set_rest_notification(hit_type.HITTypeId, url, event_types=all_event_types) except: pass # Specify all the HIT parameters self.param_dict = dict(hit_type=hit_type.HITTypeId, question=mturk_question, lifetime=hit_config['lifetime'], max_assignments=hit_config['max_assignments'], title=hit_config['title'], description=hit_config['description'], keywords=hit_config['keywords'], reward=hit_config['reward'], duration=hit_config['duration'], approval_delay=None, questions=None, qualifications=quals, response_groups=[ 'Minimal', 'HITDetail', 'HITQuestion', 'HITAssignmentSummary' ])
def generate_hits(mtc_type, subset, begin_index, args): from boto.mturk.connection import MTurkConnection from boto.mturk.question import QuestionContent, Question, QuestionForm, Overview, AnswerSpecification, SelectionAnswer, FormattedContent, FreeTextAnswer from boto.mturk.qualification import PercentAssignmentsApprovedRequirement, Qualifications, Requirement mtc = mtk_utils.get_mtc(mtc_type) title = 'Give your opinion of interestingness level about images' description = ('Watch images and give us your opinion of interestingness level about the images') keywords = 'image, interestingness, interesting, rating, opinions' ratings =[('Very boring','-2'), ('Boring','-1'), ('Neutral','0'), ('Interesting','1'), ('Very interesting','2')] #--------------- BUILD OVERVIEW ------------------- overview = Overview() overview.append_field('Title', 'Give your opinion about interestingness level on those images') #--------------- BUILD QUESTIONs ------------------- questions = [] if (args.m != 'qua'): utils.write_file(subset, args.o + '.index_' + str(begin_index) + '.txt') if (args.m == 'qua_init' and begin_index > 0): return for image_url in subset: qc = QuestionContent() qc.append_field('Title','How interesting the image to you?') qc.append(FormattedContent('<img src="' + image_url + '" alt="image" />')) fta = SelectionAnswer(min=1, max=1,style='dropdown', selections=ratings, type='text', other=False) q = Question(identifier='interestingness', content=qc, answer_spec=AnswerSpecification(fta), is_required=True) questions.append(q) #--------------- BUILD THE QUESTION FORM ------------------- question_form = QuestionForm() question_form.append(overview) for question in questions: question_form.append(question) # BUILD QUALIFICATION qualifications = Qualifications() req = PercentAssignmentsApprovedRequirement(comparator = "GreaterThan", integer_value = "95") qualifications.add(req) if (args.m == 'qua'): if (args.q != None): qua_req = Requirement(qualification_type_id = args.q, comparator = 'EqualTo', integer_value = '1') qualifications.add(qua_req) else: print("Please give qualification type id in 'qua' mode.") sys.exit(0) #--------------- CREATE THE HIT ------------------- hit = mtc.create_hit(questions = question_form, qualifications = qualifications, max_assignments = 10, title = title, description = description, keywords = keywords, duration = 60 * 30 * 2, reward = 0.2) if (args.m == 'qua_init'): print("Create qualification type for HIT id: " + hit[0].HITId) quatype = mtc.create_qualification_type(name = hit[0].HITId, description = "Temporary qualification for HIT " + hit[0].HITId, status = 'Active') print("Qualification type id: " + quatype[0].QualificationTypeId)
'tests', 'tests.demo', ] mturk_hit_settings = { 'keywords': ['easy', 'bonus', 'choice', 'study'], 'title': 'Title for your experiment', 'description': 'Description for your experiment', 'frame_height': 500, 'preview_template': 'global/MTurkPreview.html', 'minutes_allotted_per_assignment': 60, 'expiration_hours': 7*24, # 7 days # to prevent retakes 'grant_qualification_id': 'YOUR_QUALIFICATION_ID_HERE', 'qualification_requirements': [ LocaleRequirement("EqualTo", "US"), PercentAssignmentsApprovedRequirement("GreaterThanOrEqualTo", 50), NumberHitsApprovedRequirement("GreaterThanOrEqualTo", 5), # Requirement('YOUR_QUALIFICATION_ID_HERE', 'DoesNotExist'), ] } SESSION_CONFIG_DEFAULTS = { 'real_world_currency_per_point': 0.01, 'participation_fee': 10.00, 'num_bots': 12, 'doc': "", 'mturk_hit_settings': mturk_hit_settings, }
from boto.mturk.qualification import (Qualifications, PercentAssignmentsApprovedRequirement, NumberHitsApprovedRequirement) qualifications = Qualifications() qual_1 = PercentAssignmentsApprovedRequirement( comparator="GreaterThan", integer_value="0") # qual_2 = NumberHitsApprovedRequirement( # comparator="GreaterThan", # integer_value="0") qualifications.add(qual_1) # qualifications.add(qual_2) YesNoHitProperties = { "title": "Choose good annotations of the following category. 5 categories", "description": "LabelMeLite Yes/No Tool", "keywords": "image,annotation", "reward": 0.05, "duration": 60*10, "frame_height": 800, "max_assignments": 5, "country": ["US", "DE"], "qualifications": qualifications } EditHitProperties = { "title": "Refine annotations of the following category", "description": "LabelMeLite Edit Tool", "keywords": "image,annotation",
def send_hit(self): # First check for qualifications qualifications = Qualifications() if self.qualification_adult: qualifications.add(AdultRequirement("EqualTo", 1)) else: qualifications.add(AdultRequirement("EqualTo", 0)) if self.qualification_custom not in [None, ""]: qualifications.add( Requirement(self.qualification_custom, self.qualification_custom_operator, self.qualification_custom_value, required_to_preview=True)) if self.qualification_number_hits_approved is not None: qual_number_hits = NumberHitsApprovedRequirement( "GreaterThan", self.qualification_number_hits_approved) qualifications.add(qual_number_hits) if self.qualification_percent_assignments_approved is not None: qual_perc_approved = PercentAssignmentsApprovedRequirement( "GreaterThan", self.qualification_percent_assignments_approved) qualifications.add(qual_perc_approved) if self.qualification_locale != 'None': qualifications.add( LocaleRequirement("EqualTo", self.qualification_locale)) # Domain name must be https url = "%s/turk/%s" % (DOMAIN_NAME, self.id) frame_height = 900 questionform = ExternalQuestion(url, frame_height) if len(qualifications.requirements) > 0: result = self.connection.create_hit( title=self.title, description=self.description, keywords=self.keywords, duration=datetime.timedelta(self.assignment_duration_in_hours / 24.0), lifetime=datetime.timedelta(self.lifetime_in_hours / 24.0), max_assignments=self.max_assignments, question=questionform, qualifications=qualifications, reward=Price(amount=self.reward), response_groups=('Minimal', 'HITDetail'))[0] else: result = self.connection.create_hit( title=self.title, description=self.description, keywords=self.keywords, duration=datetime.timedelta(self.assignment_duration_in_hours / 24.0), lifetime=datetime.timedelta(self.lifetime_in_hours / 24.0), max_assignments=self.max_assignments, question=questionform, reward=Price(amount=self.reward), response_groups=('Minimal', 'HITDetail'))[0] # Update our hit object with the aws HIT self.mturk_id = result.HITId # When we generate the hit, we won't have any assignments to update self.update(mturk_hit=result)
def make_hit(image_url): title = 'Label image with its location' description = 'Answer questions about an image to label its location.' keywords = 'image categorization, locations, scene recognition' in_out = [('indoors', '0'), ('outdoors', '1')] nat_manmade = [('man-made', '0'), ('natural', '1')] functions = [('transportation/urban', '0'), ('restaurant', '1'), ('recreation', '2'), ('domestic', '3'), ('work/education', '4'), ('other/unclear', '5')] landscapes = [('body of water/beach', '0'), ('field', '1'), ('mountain', '2'), ('forest/jungle', '3'), ('other/unclear', '4')] #--------------- BUILD OVERVIEW ------------------- overview = Overview() overview.append_field('Title', title) with open(INSTRUCTIONS_HTML) as html: instructions = html.read() overview.append(FormattedContent(instructions)) image = Binary('image', None, image_url, 'image') overview.append(image) #--------------- BUILD QUESTION 1 ------------------- qc1 = QuestionContent() qc1.append_field( 'Text', 'Is the location shown in the image indoors or outdoors?') fta1 = SelectionAnswer(min=1, max=1, style='checkbox', selections=in_out, type='text', other=False) q1 = Question(identifier='Question 1', content=qc1, answer_spec=AnswerSpecification(fta1), is_required=True) #--------------- BUILD QUESTION 2 ------------------- qc2 = QuestionContent() qc2.append_field( 'Text', 'Is the location shown in the image man-made or ' + 'natural? Examples of man-made locations include ' + 'buildings and parks while examples of natural ' + 'locations include mountains and rivers.') fta2 = SelectionAnswer(min=1, max=1, style='checkbox', selections=nat_manmade, type='text', other=False) q2 = Question(identifier='Question 2', content=qc2, answer_spec=AnswerSpecification(fta2), is_required=True) #--------------- BUILD QUESTION 3 ------------------- qc3 = QuestionContent() qc3.append_field( 'Text', 'If the location in the image is man-made, what is the ' + 'general function or type of the location? If the ' + 'location is natural (not man-made), don\'t select ' + 'anything here.') fta3 = SelectionAnswer(min=0, max=1, style='checkbox', selections=functions, type='text', other=False) q3 = Question(identifier='Question 3', content=qc3, answer_spec=AnswerSpecification(fta3), is_required=False) #--------------- BUILD QUESTION 4 ------------------- qc4 = QuestionContent() qc4.append_field( 'Text', 'If the location in the picture is natural, what ' + 'kind of natural location is it? If the location ' + 'man-made (not natural), don\'t select anything here.') fta4 = SelectionAnswer(min=0, max=1, style='checkbox', selections=landscapes, type='text', other=False) q4 = Question(identifier='Question 4', content=qc4, answer_spec=AnswerSpecification(fta4), is_required=False) #--------------- BUILD THE QUESTION FORM ------------------- question_form = QuestionForm() question_form.append(overview) question_form.append(q1) question_form.append(q2) question_form.append(q3) question_form.append(q4) #-------------- QUALIFICATIONS ------------------- percent = PercentAssignmentsApprovedRequirement('GreaterThanOrEqualTo', 95) number = NumberHitsApprovedRequirement('GreaterThanOrEqualTo', 200) quals = Qualifications() quals.add(percent) quals.add(number) #--------------- CREATE THE HIT ------------------- mtc.create_hit(questions=question_form, max_assignments=1, title=title, description=description, keywords=keywords, qualifications=quals, annotation=image_url, duration=60 * 10, reward=0.03)
def create_hit(settings): global hit ## make sure there isn't already a hit if (hit is not None): sys.exit("Error: it looks like you already created the hit in %s mode (HIT ID stored in hit_modes.json)" % mode) hit_quals = Qualifications() settings_quals = settings["qualifications"] ## TODO: master worker, custom quals, utility for creating qualifications? if (settings_quals): if settings_quals["location"]: hit_quals.add(LocaleRequirement("EqualTo", settings_quals["location"])) if settings_quals["approval_percentage"]: hit_quals.add(PercentAssignmentsApprovedRequirement("GreaterThanOrEqualTo", settings_quals["approval_percentage"])) prints( "Your settings are:", "", dict_str(settings_raw) ) ##if "http:" in settings["url"]: ## sys.exit("Error: inside settings.json, your url is set to use 'http:'. It needs to use 'https:'") ## todo: some static analysis if dialogue_mode=="verbose": prints( "", "Are these settings okay? (yes/no)") confirm_settings = raw_input("> ") else: confirm_settings = "yes" if "n" in confirm_settings: sys.exit() if dialogue_mode=="verbose": prints( "", "How many assignments do you want to start with?", "(you can always add more later using cosub add)") max_assignments = None while max_assignments is None: try: max_assignments = int(raw_input("> ")) except ValueError: prints("Couldn't understand answer. Try entering a positive integer (e.g., 20)") else: prints("You will start with " + str(defaultNSs) + " assignments.") max_assignments = defaultNSs if mode == "production": reward = settings["reward"] cost = max_assignments * float(reward) fee = 0.4 if max_assignments > 9 else 0.2 fee_str = "40%" if fee == 0.4 else "20%" prints( "The cost will be $%.2f -- %s assignments * $%.2f/assignment + %s fee" % (cost, max_assignments, reward, fee_str) ) if dialogue_mode=="verbose": prints("Is this okay? (yes/no)") confirm_cost = raw_input("> ") if "n" in confirm_cost: sys.exit() else: print("(This won't cost anything because you're in sandbox mode)") ## TODO: implement bounds checking for assignments if dialogue_mode=="verbose": prints( "", "How long do you want to collect data for?", "You can give an answer in seconds, minutes, hours, days, or weeks.", "(and you can always add more time using cosub add)") lifetime_seconds = None while lifetime_seconds is None: lifetime = raw_input("> ") lifetime_seconds = timeparse(lifetime) if not lifetime_seconds: prints("Couldn't understand answer; try an answer in one of these formats:", " 2 weeks", " 3 days", " 12 hours", " 30 minutes") else: prints("You will collect data for " + defaultDuration + ".") lifetime_seconds = timeparse(defaultDuration) ## TODO: implement bounds checking for time (30 seconds - 1 year) prints("","Creating HIT...","") request_settings = dict( title = settings["title"], description = settings["description"], keywords = settings["keywords"], question = question.HTMLQuestion(settings["url"], settings["frame_height"]), max_assignments = max_assignments, reward = settings["reward"], approval_delay = timedelta(seconds = settings["auto_approval_delay"]), duration = timedelta(seconds = settings["assignment_duration"]), lifetime = timedelta(seconds = lifetime_seconds), qualifications = hit_quals ) try: create_result = mtc.create_hit(**request_settings)[0] except MTurkRequestError as e: print("Error\n") pp(e.__dict__) sys.exit(1) hit = { "id": create_result.HITId, # hit_group_id = hit.HITGroupId, "type_id": create_result.HITTypeId } hit_modes[mode] = hit print("Successfully created HIT") ## write hit and HITTypeId into even-odd.json with open("hit_modes.json", 'w') as new_settings_file: json.dump(hit_modes, new_settings_file, indent=4, separators=(',', ': ')) print("Wrote HIT ID and HIT Type ID to hit_modes.json") prints( "- The number of initial assignments is set to %s" % request_settings["max_assignments"], "- The initial HIT lifetime is set to %s" % humane_timedelta(request_settings["lifetime"])) prints( "", "Manage HIT: ", HOST_requester + "/mturk/manageHIT?HITId=" + hit["id"]) prints( "", "View HIT: ", HOST_worker + "/mturk/preview?groupId=" + hit["type_id"], "") logger.write({'Action': 'Create', 'Data': settings_raw })
SANDBOX = True HOST = { True: "mechanicalturk.sandbox.amazonaws.com", False: "mechanicalturk.amazonaws.com" }[SANDBOX] NUM_ITERATIONS = 10 EXPERIMENT_URL = """https://sophiaray.github.io/quadmods/abstract.html?shapeCond=Rh,S&condition=label-order""" mtc = MTurkConnection(aws_access_key_id=AK, aws_secret_access_key=SK, host=HOST) quals = Qualifications() quals.add(PercentAssignmentsApprovedRequirement('GreaterThanOrEqualTo', 95)) quals.add(NumberHitsApprovedRequirement('GreaterThanOrEqualTo', 1)) quals.add(LocaleRequirement('EqualTo', 'US')) new_hit = mtc.create_hit( hit_type=None, question=ExternalQuestion(EXPERIMENT_URL, 600), lifetime=2 * 60 * 60, # Amount of time HIT will be available to accept unless 'max_assignments' are accepted before max_assignments=NUM_ITERATIONS, title='$3 for 15 min. | Concept learning | University of Louisville', description= 'Participate in a simple psychological experiment on concept learning. The complete duration should be approximately 15 minutes (reward is estimated according to $12/hr).', keywords='concepts, learning', reward=3.0, duration=45 *
'<p>KarmaNotes.org is a non-profit organization dedicated to free and open education. ' \ 'We need your help to identify keywords and definitions in college student lecture notes. ' \ 'Here is one example from an American History course:</p>' \ '<p><strong>Constitutional Amendment</strong> — The process whereby the US ' \ 'Constitution may be altered by a two-thirds vote of the Senate and House of ' \ 'Representatives or a vote by at least two-thirds of the states.</p>' \ '<p>In the notes below, please find keywords and definitions like the example above.</p>' \ '<p>Notes link: <strong><a href="http://{domain}{link}">' \ 'http://{domain}{link}</a></strong></p>' \ '<p>In these notes, please find 10 to 20 key words and definitions within these student notes. ' \ 'With your help, we will generate free and open flashcards and quizzes to help ' \ 'students study. Together we can open education, one lecture at a time.</p>' KEYWORDS_HIT_KEYWORDS = 'writing, summary, keywords' KEYWORDS_HIT_DURATION = 60 * 60 * 24 * 7 KEYWORDS_HIT_REWARD = 0.92 KEYWORDS_HIT_PERCENT_APPROVED_REQUIREMENT = PercentAssignmentsApprovedRequirement( comparator='GreaterThan', integer_value=95) KEYWORDS_HIT_QUALIFICATION = Qualifications( requirements=[KEYWORDS_HIT_PERCENT_APPROVED_REQUIREMENT]) KEYWORDS_HIT_KEYWORD_FIELDS = [ ('keyword01', 'Keyword 1'), ('keyword02', 'Keyword 2'), ('keyword03', 'Keyword 3'), ('keyword04', 'Keyword 4'), ('keyword05', 'Keyword 5'), ('keyword06', 'Keyword 6'), ('keyword07', 'Keyword 7'), ('keyword08', 'Keyword 8'), ('keyword09', 'Keyword 9'), ('keyword10', 'Keyword 10'), ('keyword11', 'Keyword 11'),
def generate_hits(data_sources, args): from boto.mturk.connection import MTurkConnection from boto.mturk.question import QuestionContent, Question, QuestionForm, Overview, AnswerSpecification, SelectionAnswer, FormattedContent, FreeTextAnswer from boto.mturk.qualification import PercentAssignmentsApprovedRequirement, Qualifications ACCESS_ID = amazon_config.ACCESS_ID SECRET_KEY = amazon_config.SECRET_KEY HOST = 'mechanicalturk.sandbox.amazonaws.com' mtc = MTurkConnection(aws_access_key_id=ACCESS_ID, aws_secret_access_key=SECRET_KEY, host=HOST) title = 'Give your opinion of interestingness level about images' description = ( 'Watch images and give us your opinion of interestingness level about the images' ) keywords = 'image, interestingness, interesting, rating, opinions' ratings = [('Very boring', '-2'), ('Boring', '-1'), ('Neutral', '0'), ('Interesting', '1'), ('Very interesting, I would like to share it with my friends.', '2')] #--------------- BUILD OVERVIEW ------------------- overview = Overview() overview.append_field( 'Title', 'Give your opinion about interestingness level on those images') #--------------- BUILD QUESTIONs ------------------- questions = [] subset = data_sources[0:50] utils.write_file(subset, args.o) for image_url in subset: qc = QuestionContent() qc.append_field('Title', 'How interesting the image to you?') qc.append( FormattedContent('<img src="' + image_url + '" alt="image" />')) fta = SelectionAnswer(min=1, max=1, style='dropdown', selections=ratings, type='text', other=False) q = Question(identifier='interestingness', content=qc, answer_spec=AnswerSpecification(fta), is_required=True) questions.append(q) #--------------- BUILD THE QUESTION FORM ------------------- question_form = QuestionForm() question_form.append(overview) for question in questions: question_form.append(question) # BUILD QUALIFICATION qualifications = Qualifications() req = PercentAssignmentsApprovedRequirement(comparator="GreaterThan", integer_value="95") qualifications.add(req) #--------------- CREATE THE HIT ------------------- mtc.create_hit(questions=question_form, qualifications=qualifications, max_assignments=10, title=title, description=description, keywords=keywords, duration=60 * 30, reward=0.3)
def create_hit(href, hit): if hit.type == 'MULTIPLE_URLS': # the max_workers was not set in this form hit.max_workers = hit.size1 + hit.size2 + hit.size3 + hit.size4 + hit.size5 + \ hit.size6 + hit.size7 + hit.size8 + hit.size9 + hit.size10 hostURL = SANDBOX_HOST if hit.sandbox else HOST qualifications = Qualifications() if hit.hit_approval > 0: qualifications.add( NumberHitsApprovedRequirement("GreaterThan", hit.hit_approval, False)) if hit.hit_approval_rate > 0: qualifications.add( PercentAssignmentsApprovedRequirement("GreaterThan", hit.hit_approval_rate, False)) if hit.accepted_hit_rate > 0: qualifications.add( PercentAssignmentsSubmittedRequirement("GreaterThan", hit.accepted_hit_rate, False)) if hit.returned_hit_rate > 0: qualifications.add( PercentAssignmentsReturnedRequirement("GreaterThan", hit.returned_hit_rate, False)) if hit.abandoned_hit_rate > 0: qualifications.add( PercentAssignmentsAbandonedRequirement("LessThan", hit.abandoned_hit_rate, False)) if hit.rejected_hit_rate > 0: qualifications.add( PercentAssignmentsRejectedRequirement("LessThan", hit.rejected_hit_rate, False)) if hit.locale_qualification is not None and hit.locale_qualification != 'all': qualifications.add( LocaleRequirement("EqualTo", hit.locale_qualification, False)) connection = MTurkConnection(aws_access_key_id=hit.aws_access_key, aws_secret_access_key=hit.aws_secret_key, host=hostURL, is_secure=True) return Response( connection.create_hit( question=ExternalQuestion(href, hit.frame_height), lifetime=hit.lifetime, max_assignments=hit.max_workers, title=hit.title, description=hit.description, keywords=['turktime'] # TODO , reward=hit.reward, duration=hit.duration, approval_delay=hit.approval_delay, qualifications=qualifications, response_groups=[ 'Minimal', 'HITDetail', 'HITQuestion', 'HITAssignmentSummary' ]))