예제 #1
0
def callback(ch, method, properties, body):
    start_time = time.time()
    print(json.loads(body.decode()))
    data = json.loads(body.decode())["data"]

    sourceCodeBasePath = "tmp/src"
    extractTarGz(tarGzBase64=data["sourceCodeBase64"],
                 basePath=sourceCodeBasePath)

    # start docker
    grade = Grader(tmpPath="tmp",
                   entryPoint=data["entry"],
                   testcases=data["testcases"])
    result = grade.grade()
    result['user'] = {
        'projectId': data["projectId"],
        'userId': data["userId"],
        'courseId': data["courseId"],
        'activityId': data["activityId"]
    }
    headers = {'Content-Type': "application/json"}

    requests.post(f'{os.getenv("BRIDGE_SERVICE_URL")}/callback/',
                  data=json.dumps(result),
                  headers=headers)
    end_time = time.time() - start_time
    with open("execution_time.log", "a+") as f:
        f.write(f"{end_time}\n")
    print("finish process message")
    ch.basic_ack(delivery_tag=method.delivery_tag)
예제 #2
0
class GraderThread(GraderSkeleton, threading.Thread):
    def __init__(self,
                 shared_counter,
                 test_config,
                 time_counter=SharedCounter(val_type='d'),
                 **kwargs):
        super(GraderThread, self).__init__(shared_counter, test_config,
                                           time_counter, kwargs)
        threading.Thread.__init__(self)

        # initialize grader
        self.grader = Grader()

    def init(self):
        self.grader.init(self.test_config)

    def grade(self):
        # calculate result
        success_count, success_time = self.grader.test()
        if success_count > 0:
            self.shared_counter.increment()
            self.time_counter.increment(success_time)

    def run(self):
        while self.loop > 0:
            self.grade()
            self.loop -= 1

            # only sleep when we need to do next grade
            if self.loop > 0:
                sleep(self.spawn_interval)
예제 #3
0
def main() -> None:
    tests = [
        TestGenSentences,
        TestDetokenize,
        TestSarcasticCaser,
    ]
    grader = Grader(tests)
    grader.print_results()
예제 #4
0
    def __init__(self, participant_id):
        self.participant_id = participant_id
        self.condition = None

        # Data logged from iFEED
        self.learning_task_data = dict()
        self.design_synthesis_task_data = dict()
        self.feature_synthesis_task_data = dict()

        # Concept map data
        self.cmap_prior_data = dict()
        self.cmap_learning_data = dict()
        self.cmap_learning_data_extended = dict()

        # Problem answers
        self.feature_classification_answer = []
        self.feature_classification_confidence = []
        self.feature_comparison_answer = []
        self.feature_comparison_confidence = []
        self.design_classification_answer = []
        self.design_classification_confidence = []
        self.design_comparison_answer = []
        self.design_comparison_confidence = []

        # Feature preference questions
        self.feature_preference_data = dict()

        # Self-assessment of learning
        self.learning_self_assessment_data = []

        # Demographic info
        self.demographic_data = dict()
        self.prior_experience_data = dict()

        # Graded score and answers
        self.grader = Grader()
        self.feature_classification_score = None
        self.feature_classification_graded_answers = []
        self.feature_comparison_score = None
        self.feature_comparison_graded_answers = []
        self.design_classification_score = None
        self.design_classification_graded_answers = []
        self.design_comparison_score = None
        self.design_comparison_graded_answers = []

        # Transcript data
        self.transcript_problem_solving = None
        self.transcript_survey = None

        # IDG and distance to utopia
        self.feature_synthesis_dist2UP = dict()
        self.design_IGD = dict()
        self.design_num_designs_to_shortest_dist = -1

        self.design_HV = None
        self.design_entropy = None
예제 #5
0
def main() -> None:
    tests = [
        TestSampler,
        TestBigramSampler,
        TestTrigramSampler,
        TestSequenceProbabilityBigram,
        TestSequenceProbabilityTrigram,
    ]
    grader = Grader(tests)
    grader.print_results()
예제 #6
0
    def __init__(self,
                 shared_counter,
                 test_config,
                 time_counter=SharedCounter(val_type='d'),
                 **kwargs):
        super(GraderThread, self).__init__(shared_counter, test_config,
                                           time_counter, kwargs)
        threading.Thread.__init__(self)

        # initialize grader
        self.grader = Grader()
예제 #7
0
def main() -> None:
    tests = [
        TestGenSentences,
        TestNGrams,
        TestCounts,
        TestFrequencyDistributions,
        TestProbabilities,
        TestComparison,
    ]
    grader = Grader(tests)
    grader.print_results()
예제 #8
0
def main() -> None:
    tests = [
        TestScoringMetrics,
        TestFeatureExtractor,
        TestInstanceCounter,
        TestNaiveBayesSegmentation,
        TestNaiveBayesSentiment,
        TestPerformanceSegmentation,
        TestPerformanceSentiment,
        TestTunedSegmentation,
    ]
    grader = Grader(tests)
    grader.print_results()
예제 #9
0
파일: tests.py 프로젝트: CIS192/grader
def test():
    """Run the sample tests as a grader problem."""
    from grader import Problem, Grader
    problem1 = Problem(TestSequenceFunctions, [
        ('test_shuffle', 4),
        ('test_choice', 2),
        ('test_sample', 4),
        ('test_sleep', 5),
        ('test_no_docstring', 5),
        ('test_timeout', 5),
        ], timeout=3)
    grader = Grader([problem1])
    grader.print_results()
예제 #10
0
def main() -> None:
    tests = [
        TestMostFrequentTagTagger,
        TestUnigramTagger,
        TestInstanceCounterUnsmoothed,
        TestInstanceCounterSmoothed,
        TestSentenceCounterSpeed,
        TestBigramSequenceProbability,
        TestGreedyBigramTagger,
        TestViterbiBigramTagger,
        TestDeterminism,
    ]
    grader = Grader(tests, timeout=12)
    grader.print_results()
    def __init__(self, jsonFilesRootPath, participant_id):
        self.jsonFilesRootPath = jsonFilesRootPath
        self.participant_id = participant_id

        self.condition = None

        # Data logged from iFEED
        self.learning_task_data = dict()
        self.design_synthesis_task_data = dict()
        self.feature_synthesis_task_data = dict()

        # Concept map data
        self.cmap_prior_data = dict()
        self.cmap_learning_data = dict()

        # Problem answers
        self.feature_classification_answer = []
        self.feature_classification_confidence = []
        self.feature_comparison_answer = []
        self.feature_comparison_confidence = []
        self.design_classification_answer = []
        self.design_classification_confidence = []
        self.design_comparison_answer = []
        self.design_comparison_confidence = []

        # Feature preference questions
        self.feature_preference_data = dict()

        # Self-assessment of learning
        self.learning_self_assessment_data = dict()

        # Demographic info
        self.age = None
        self.gender = None
        self.major = None
        self.prior_experience_data = dict()

        # Read JSON files
        self.readJSONFiles()

        # Graded score and answers
        self.grader = Grader()
        self.feature_classification_score = None
        self.feature_classification_graded_answers = []
        self.feature_comparison_score = None
        self.feature_comparison_graded_answers = []
        self.design_classification_score = None
        self.design_classification_graded_answers = []
        self.design_comparison_score = None
        self.design_comparison_graded_answers = []
예제 #12
0
def grade():
    # start a session to grade a project!
    browser = launch_browser(headless=args.headless, timeout=8)
    headless_grader = Grader(browser)
    try:
        headless_grader.login_refresh_grade()

    except Exception:
        err_msg = '***** FAILED {} ***** '.format(format(str(datetime.now())))
        logger.error(err_msg, exc_info=True)
        with open('logs.txt', 'a') as f:
            f.write('******************************************************\n')
            f.write(err_msg + '\n')
            f.write(traceback.format_exc())
    headless_grader.browser.quit()
예제 #13
0
def main(image_path):
    # get the final worksheet from the image
    ext = Extractor(image_path, False)
    final = ext.final

    # get the form code by checking the image's QR code
    decoded_qr_code = reader(final)

    # extract the cells and student's responses
    cells = Cells(final)

    # grade the worksheet by using a CNN to OCR the student's responses
    grader = Grader(decoded_qr_code)
    grader.grade(cells.student_responses)
    worksheet = grader.display(final, cells.sorted_contours)
    Helpers.save_image(f'{Helpers.IMAGE_DIRECTORY}/graded.png', worksheet)
예제 #14
0
    def update(self) -> 'FormReturnStatus':
        """Update the form screen.

        This is used to update the screen every 'tick' when this
        function is called.
        """
        try:
            if self.settings_form is not None:
                self.settings_form = self.settings_form.update()
                self.refresh_settings()
                self.update_after_settings_finish = True
            elif self.update_after_settings_finish:
                # Only update the grader after the settings form has closed
                # since it reads data files to get it's data, and we want to
                # limit file reading as much as possible. That's why this isn't
                # in refresh_settings.
                self.grader = Grader(Settings.active_files)
                self.display("-- new questions loaded --")
                self.update_stats()
                # Set that the user is reviewing, so the next question is
                # not automatically wrong when they hit the next button
                # directly after changing the settings (updating the grader).
                # This lets them view the next question as normal.
                self.is_reviewing = True
                self.update_after_settings_finish = False

            if not self.scheduled_actions.empty():
                # Execute the scheduled function.
                self.scheduled_actions.get()()
            self.form.update_idletasks()
            self.form.update()
        except TclError:  # The form has been destroyed (i.e. on restart)
            return self.return_status
        return FormReturnStatus.RUNNING
예제 #15
0
 def __init__(self):
     self.settings = Settings()
     self.logger = Logger(self.settings.logfile)
     self.reporter = Reporter(self.settings)
     self.setup = Setup(self.settings, self.logger)
     self.grader = Grader(self.settings, self.logger, self.setup)
     self.analyser = Analyser(self.settings, self.reporter, self.logger,
                              self.setup, self.grader)
예제 #16
0
    def __init__(self):
        """TODO: INSERT DOCSTRING."""
        # Set the exception callback to handle any of the exceptions
        # that are raised.
        tk.Tk.report_callback_exception = exception.unhandled_error
        # Set what the program will do when closed (stop / restart).
        self.return_status: FormReturnStatus = FormReturnStatus.STOP

        # Load all of the 'quiz' elements.
        self.grader: Grader = Grader(Settings.active_files)

        # Set form values from the settings.
        self.font_style: str = Settings.typeface
        self.font_size: int = Settings.font_size
        # The segment of the data objects to display.
        self.display_item: int = Settings.display_item
        self.theme: Theme = Theme()

        self.settings_form: SettingsForm = None
        # Tracks if the settings form has closed. If so preform singe action
        # updates, such as updating the Grader, this is to limit number
        # of unnessicary file reads.
        self.update_after_settings_finish: bool = False

        # Create the main form and configure it.
        self.form = tk.Tk()
        self.form.geometry(INIT_SCREEN_SIZE)
        self.form.minsize(MIN_SCREEN_SIZE[0], MIN_SCREEN_SIZE[1])
        self.form.title(SCREEN_TITLE)

        # Create the main frame to hold all the elements.
        self.frame: Frame = Frame(self.form)
        # Make the main frame fit the size of the window.
        self.frame.pack(fill=BOTH, expand=1)
        self.theme.add_to_group(self.frame, ThemeGroup.MAIN_GROUP)

        # Tracks if the user is in the review state (i.e. looking at the
        # anwser)
        self.is_reviewing: bool = True
        # Tracks if the user is allowed to move on. This is activated
        # for a short time when the user gets a question wrong. This
        # allows them time to review the correct answer, and prevents
        # them from accidentaly skipping it.
        self.progress_blocked: bool = False

        # A queue that other threads can store function calls to be
        # executed by the main thread on a form update.
        self.scheduled_actions: Queue = Queue()

        self.create_widgets()  # Populate the form.
        # Set the 'Enter' key to activate the submit button so the user
        # can submit using the keyboard.
        self.form.bind(RETURN_KEY, self.submit_callback)
        self.theme.set_theme_color()  # Theme the form.
        self.display(INITAL_MESSAGE)  # Show the user the welcome message.
예제 #17
0
def grade(name=None):
    grader = Grader()
    if not name:
        count = grader.calc()
        print "jobs done, scores of %d plugins updated" % count
        return 

    try:
        id = int(name)
    except:
        id = 0

    table = PluginTable()
    if id:
        plugin = table.findById(id)
    else:
        plugin = table.findByName(name)

    if plugin:
        grader.calc(data=plugin)
    else:
        print "Sorry, we cannot find plugin %s" % name 
예제 #18
0
파일: main.py 프로젝트: Liechti/exam-ai
def main():
    questions = parse.parse_test("exams/102.txt")

    # Example: get all questions from section 3.
    section_three = get_section(questions, 3)

    # Uncomment to print all questions.
    '''
    for question in questions:
        print '--------------'
        for k,v in question.items():
            print k,':',v
    '''
    
    # Ngram search on section 1 & 2
    
    driverinit()
    
    section_one = get_section(questions, 1)
    #section_tmp = [question for question in questions if 1 <= question['number'] <= 15 ]
    answers = [ngram_analysis(question) for question in section_one]
    
    section_two = get_section(questions, 2)
    #section_tmp = [question for question in questions if 28 <= question['number'] <= 28 ]
    answers.extend([sec2solver(question) for question in section_two])
    driverclose()
    
    # Example Grader
    
    #answers = ['B']*56
    answers.extend(['B']*26)
    nums = range(1, 57)

    grader = Grader("solutions/102ans.txt")
    
    # Uncomment to see example grader.
    grader.grade_questions(nums, answers)
 def run_grader_cb(self, feedback):
     g = Grader(self.server)
     rospy.sleep(1.0)
     self.og.simple_obs()
     rospy.sleep(0.5)
     Ttrans = tf.transformations.translation_matrix((0.5, -0.2, 0.3))
     Rtrans = tf.transformations.rotation_matrix(1.57, (0, 1, 0))
     T = numpy.dot(Ttrans, Rtrans)
     g.goto_pose("Simple Obstacle", T, 10)
     self.og.complex_obs()
     rospy.sleep(0.5)
     Ttrans = tf.transformations.translation_matrix((0.4, 0.5, 0.3))
     Rtrans = tf.transformations.rotation_matrix(1.57, (0, 1, 0))
     T = numpy.dot(Ttrans, Rtrans)
     g.goto_pose("Medium Obstacle", T, 30)
     self.og.super_obs()
     rospy.sleep(0.5)
     Ttrans = tf.transformations.translation_matrix((0.5, 0.0, 0.5))
     Rtrans = tf.transformations.rotation_matrix(1.57, (0, 1, 0))
     T = numpy.dot(Ttrans, Rtrans)
     g.goto_pose("Hard Object", T, 120)
예제 #20
0
transactions = pd.read_csv(os.path.join(DATA_FOLDER, 'sales_train.csv.gz'))
items = pd.read_csv(os.path.join(DATA_FOLDER, 'items.csv'))
item_categories = pd.read_csv(os.path.join(DATA_FOLDER, 'item_categories.csv'))
shops = pd.read_csv(os.path.join(DATA_FOLDER, 'shops.csv'))

# The dataset we are going to use is taken from the competition, that serves as the final project for this course. You can find complete data description at the [competition web page](https://www.kaggle.com/c/competitive-data-science-final-project/data). To join the competition use [this link](https://www.kaggle.com/t/1ea93815dca248e99221df42ebde3540).

# ## Grading

# We will create a grader instace below and use it to collect your answers. When function `submit_tag` is called, grader will store your answer *locally*. The answers will *not* be submited to the platform immediately so you can call `submit_tag` function as many times as you need.
#
# When you are ready to push your answers to the platform you should fill your credentials and run `submit` function in the <a href="#Authorization-&-Submission">last paragraph</a>  of the assignment.

# In[5]:

grader = Grader()

# # Task

# Let's start with a simple task.
#
# <ol start="0">
#   <li><b>Print the shape of the loaded dataframes and use [`df.head`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.head.html) function to print several rows. Examine the features you are given.</b></li>
# </ol>

# In[6]:

shops.head()

# In[7]:
예제 #21
0
def main(args):
    data = read(HOUGHED + args.filename)
    grader = Grader()
    new_data = grader.colour_data(data)
    write(new_data, HOUGHED + args.filename)
    print("DONE")
예제 #22
0
    cf = currentframe()
    return cf.f_back.f_lineno

path="C:/Users/anandrathi/Documents/DataScieince/Coursera/NLP/natural-language-processing-master/week1"
path="C:/temp/DataScience/TextParseNLP/natural-language-processing-master/week1/"
os.chdir(path)

import sys
sys.path.append("..")
from common.download_utils import download_week1_resources

download_week1_resources()


from grader import Grader
grader = Grader()

import nltk
nltk.set_proxy('http://*****:*****@proxyserver.health.wa.gov.au:8181',)
nltk.download('stopwords')
from nltk.corpus import stopwords

from ast import literal_eval
import pandas as pd
import numpy as np

def read_data(filename):
    data = pd.read_csv(filename,  sep="\t")
    data['tags'] = data['tags'].apply(literal_eval)
    return data
예제 #23
0
def GradeSubmissions(config, submissions):
    grader = Grader()
    for submission in submissions.values():
        grader.Grade(submission)
sys.path.append("..")
from common.download_utils import download_week1_resources

download_week1_resources()

# ### Grading
# We will create a grader instace below and use it to collect your answers. Note that these outputs will be stored locally inside grader and will be uploaded to platform only after running submiting function in the last part of this assignment. If you want to make partial submission, you can run that cell any time you want.

# In[4]:

from grader import Grader

# In[5]:

grader = Grader()

# ### Text preprocessing

# For this and most of the following assignments you will need to use a list of stop words. It can be downloaded from *nltk*:

# In[6]:

import nltk

nltk.download('stopwords')
from nltk.corpus import stopwords

# In this task you will deal with a dataset of post titles from StackOverflow. You are provided a split to 3 sets: *train*, *validation* and *test*. All corpora (except of *test*) contain titles of the posts and corresponding tags (100 tags are available). The *test* set is provided for Coursera's grading and doesn't contain answers. Upload the corpora using *pandas* and look at the data:

# In[7]:
예제 #25
0
    IN_COLAB = False

if IN_COLAB:
    ! wget https://raw.githubusercontent.com/hse-aml/natural-language-processing/master/setup_google_colab.py -O setup_google_colab.py
    import setup_google_colab
    setup_google_colab.setup_week1() 
    
import sys
sys.path.append("..")
from common.download_utils import download_week1_resources

download_week1_resources()

from grader import Grader

grader = Grader()

import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords

from ast import literal_eval
import pandas as pd
import numpy as np

def read_data(filename):
    data = pd.read_csv(filename, sep='\t')
    data['tags'] = data['tags'].apply(literal_eval)
    return data
    
train = read_data('data/train.tsv')
예제 #26
0
파일: views.py 프로젝트: kartikshah1/Test
    def submit_answer(self, request, pk=None):
        """Submit an answer to a question"""
        question = get_object_or_404(Question_Master, pk=pk)
        self.check_object_permissions(request, question)

        submission = User_Submissions.objects.get(
            user=request.user,
            question=question
        )

        if submission.status == submission.CORRECT or submission.status == submission.WRONG:
            error = {
                'status': False,
                'detail': 'Question already answered'
            }
            return Response(error, status.HTTP_400_BAD_REQUEST)

        if submission.is_answer_shown():
            error = {
                'status': False,
                'detail': 'Question already attempted',
            }
            return Response(error, status.HTTP_400_BAD_REQUEST)

        if submission.attempts >= question.attempts:
            error = {
                'status': False,
                'detail': 'Exceeded the maximum number of attempts'
            }
            return Response(error, status.HTTP_400_BAD_REQUEST)

        submission.attempts += 1
        attempts_remaining = question.attempts - submission.attempts

        serializer = serializers.AnswerSubmitSerializer(data=request.DATA)
        print serializer
        if serializer.is_valid():
            submission.status = User_Submissions.ATTEMPTED
            submission.answer = serializer.data['answer']

            data = {
                'status': submission.status,
                'marks': submission.marks,
                'attempts_remaining': attempts_remaining,
                'explaination': submission.explaination
            }

            grader = Grader(submission=submission, question=question)
            if grader.grade():
                submission = grader.submission
                data['status'] = submission.status
                data['marks'] = submission.marks
                data['explaination'] = submission.explaination
                if attempts_remaining == 0 or submission.status == User_Submissions.CORRECT:
                    if grader.the_question is None:
                        the_question = Question.objects.get_subclass(
                            pk=submission.question.pk)
                        data['answer'] = \
                            the_question.get_answer()
                    else:
                        data['answer'] = \
                            grader.the_question.get_answer()
                serializer = serializers.FrontEndSubmissionSerializer(data)
            else:
                serializer = serializers.FrontEndSubmissionSerializer(data)

            # return the result of grading
            return Response(serializer.data)
        else:
            submission.save()
            content = serializer.errors
            return Response(content, status.HTTP_400_BAD_REQUEST)
예제 #27
0
    def run(test_config_file_name):
        test_config = YamlConfigFileHandler(test_config_file_name)

        if test_session == 1:  # single session grade
            # init_log_file()

            grader = Grader()
            grader.init(test_config)
            grader.test()
        elif test_session > 1:  # multi session grade
            # calculate thread spawn interval
            spawn_interval = test_length / (test_session * 1.0)

            # determine grader class
            use_process = False
            handler_count = test_session
            session_per_handler = 1
            Handler_Class = GraderThread

            # use process to speed up grade
            if test_session > 512:
                use_process = True
                handler_count = multiprocessing.cpu_count()
                session_per_handler = test_session / handler_count
                Handler_Class = GraderProcess

            # count the number of spawned sessions
            session_count = 0

            # thread safe success counter
            success_count = SharedCounter()
            success_time_count = SharedCounter(val_type='d')

            # process time counter
            process_time = time.time()

            # thread group
            threads = []

            # if not use_process and test_session <= 100:
            #     init_log_file()

            report_logger.info(
                "Testing {0} sessions in {1} seconds, interval: {2}, using class {3}"
                .format(test_session, test_length, spawn_interval,
                        Handler_Class.__name__))
            report_logger.info("Warming up ...")

            warm_up_time = time.time()
            # Spawn threads
            while session_count < handler_count:
                grader_handler = Handler_Class(success_count,
                                               test_config,
                                               success_time_count,
                                               loop=session_per_handler,
                                               spawn_interval=spawn_interval *
                                               handler_count)
                grader_handler.init()

                threads.append(grader_handler)
                session_count += 1

            report_logger.info(
                "Warm up process finished in {0} seconds".format(time.time() -
                                                                 warm_up_time))

            launch_time = time.time()
            # Start threads
            for grader_handler in threads:
                grader_handler.start()

                # Wait for spawn interval
                sleep(spawn_interval)

            report_logger.info("{0} sessions started in {1}".format(
                int(session_count * session_per_handler),
                time.time() - launch_time))

            # Wait for all threads to finish
            for grader_handler in threads:
                grader_handler.join()

            questions_count = success_count.value() * len(
                test_config.get_config("questions"))
            report_logger.info(
                "Result: {0} / {1} passed. Total time: {2}\nSuccess time: {3} Passed: {4} Success avg: {5}"
                .format(success_count.value(),
                        int(session_count * session_per_handler),
                        time.time() - process_time, success_time_count.value(),
                        questions_count,
                        success_time_count.value() / questions_count))
예제 #28
0
class Subject():
    def __init__(self, participant_id):
        self.participant_id = participant_id
        self.condition = None

        # Data logged from iFEED
        self.learning_task_data = dict()
        self.design_synthesis_task_data = dict()
        self.feature_synthesis_task_data = dict()

        # Concept map data
        self.cmap_prior_data = dict()
        self.cmap_learning_data = dict()
        self.cmap_learning_data_extended = dict()

        # Problem answers
        self.feature_classification_answer = []
        self.feature_classification_confidence = []
        self.feature_comparison_answer = []
        self.feature_comparison_confidence = []
        self.design_classification_answer = []
        self.design_classification_confidence = []
        self.design_comparison_answer = []
        self.design_comparison_confidence = []

        # Feature preference questions
        self.feature_preference_data = dict()

        # Self-assessment of learning
        self.learning_self_assessment_data = []

        # Demographic info
        self.demographic_data = dict()
        self.prior_experience_data = dict()

        # Graded score and answers
        self.grader = Grader()
        self.feature_classification_score = None
        self.feature_classification_graded_answers = []
        self.feature_comparison_score = None
        self.feature_comparison_graded_answers = []
        self.design_classification_score = None
        self.design_classification_graded_answers = []
        self.design_comparison_score = None
        self.design_comparison_graded_answers = []

        # Transcript data
        self.transcript_problem_solving = None
        self.transcript_survey = None

        # IDG and distance to utopia
        self.feature_synthesis_dist2UP = dict()
        self.design_IGD = dict()
        self.design_num_designs_to_shortest_dist = -1

        self.design_HV = None
        self.design_entropy = None

    def gradeAnswers(self, confidenceThreshold=None):
        self.feature_classification_score, self.feature_classification_graded_answers = self.grader.gradeAnswers(
            "feature",
            "classification",
            self.feature_classification_answer,
            self.feature_classification_confidence,
            confidenceThreshold=confidenceThreshold)
        self.feature_comparison_score, self.feature_comparison_graded_answers = self.grader.gradeAnswers(
            "feature",
            "comparison",
            self.feature_comparison_answer,
            self.feature_comparison_confidence,
            confidenceThreshold=confidenceThreshold)
        self.design_classification_score, self.design_classification_graded_answers = self.grader.gradeAnswers(
            "design",
            "classification",
            self.design_classification_answer,
            self.design_classification_confidence,
            confidenceThreshold=confidenceThreshold)
        self.design_comparison_score, self.design_comparison_graded_answers = self.grader.gradeAnswers(
            "design",
            "comparison",
            self.design_comparison_answer,
            self.design_comparison_confidence,
            confidenceThreshold=confidenceThreshold)

    def getMeanConfidence(self,
                          problemTopic=None,
                          problemType=None,
                          countOnlyCorrectAnswers=False,
                          countOnlyWrongAnswers=False):
        if self.feature_classification_score is None or self.feature_comparison_score is None or self.design_classification_score is None or self.design_comparison_score is None:
            raise ValueError()

        gradedAnswers = []
        confidences = []

        if problemTopic is None and problemType is None:
            gradedAnswers = self.feature_classification_graded_answers + self.feature_comparison_graded_answers + self.design_classification_graded_answers + self.design_comparison_graded_answers
            confidences = self.feature_classification_confidence + self.feature_comparison_confidence + self.design_classification_confidence + self.design_comparison_confidence

        targetGrade = None
        if countOnlyCorrectAnswers:
            targetGrade = 1
        elif countOnlyWrongAnswers:
            targetGrade = 0

        if targetGrade is not None:
            tempConfidences = []
            for i, c in enumerate(gradedAnswers):
                if gradedAnswers[i] == targetGrade:
                    tempConfidences.append(confidences[i])
            confidences = tempConfidences

        return np.mean(confidences)

    def printloggedDataSummary(self, task=None, loggedData=None):
        if task is None and loggedData is None:
            raise ValueError()

        if task is not None:
            if task == "learning_task":
                data = self.learning_task_data
            elif task == "feature_synthesis_task":
                data = self.feature_synthesis_task_data
            elif task == "design_synthesis_task":
                data = self.design_synthesis_task_data

        elif loggedData is not None:
            data = loggedData

        out = [
            "Subject: {0} - condition: {1}".format(self.participant_id,
                                                   self.condition)
        ]
        for key in data.keys():
            if key in [
                    "participantID", "treatmentCondition", "stage", "duration",
                    "paramsInfo"
            ]:
                continue

            val = data[key]
            if key == "designs_evaluated":
                val = str(len(data[key]))

            elif key == "features_found":
                val = str(len(data[key]))

            out.append("{0}: {1}".format(key, val))
        print("\n".join(out))

    def getAggregateScore(self, combineFandD=True):
        FScore = (self.feature_classification_score +
                  self.feature_comparison_score) / 2
        DScore = (self.design_classification_score +
                  self.design_comparison_score) / 2
        if combineFandD:
            total = (FScore + DScore) / 2
            return round(total, 2)
        else:
            FScore = round(FScore, 2)
            DScore = round(DScore, 2)
            return FScore, DScore

    def printAggregateScore(self, combineFandD=True):
        print("Subject: {0} - condition: {1}".format(self.participant_id,
                                                     self.condition))
        if combineFandD:
            total = self.getAggregateScore(combineFandD=combineFandD)
            print("Total score: {0}".format(total))
        else:
            FScore, DScore = self.getAggregateScore(combineFandD=combineFandD)
            print("Feature: {0}, Design: {1}".format(FScore, DScore))

    def printScoreSummary(self):
        print("Subject: {0} - condition: {1}".format(self.participant_id,
                                                     self.condition))
        print("Fcl: {0}, Fpwc: {1}, Dcl: {2}, Dpwc: {3}".format(
            self.feature_classification_score, self.feature_comparison_score,
            self.design_classification_score, self.design_comparison_score))

    def gradePositiveFeatures(self):
        return self.grader.gradePositiveOrNegativeFeatures(
            self.feature_classification_graded_answers,
            self.feature_comparison_graded_answers,
            positive=True)

    def gradeNegativeFeatures(self):
        return self.grader.gradePositiveOrNegativeFeatures(
            self.feature_classification_graded_answers,
            self.feature_comparison_graded_answers,
            positive=False)

    def gradeHighLevelFeatures(self):
        return self.grader.gradeHighVsLowLevelFeatures(
            self.feature_classification_graded_answers,
            self.feature_comparison_graded_answers,
            highLevel=True)

    def gradeLowLevelFeatures(self):
        return self.grader.gradeHighVsLowLevelFeatures(
            self.feature_classification_graded_answers,
            self.feature_comparison_graded_answers,
            highLevel=False)

    def getDist2Utopia(self):
        if len(self.feature_synthesis_task_data
               ) != 0 and 'features_found' in self.feature_synthesis_task_data:
            features = self.feature_synthesis_task_data['features_found']
            self.feature_synthesis_dist2UP = 1
            for count, feature in enumerate(features):
                x = feature['metrics'][2]
                y = feature['metrics'][3]
                dist = math.sqrt((1.0 - x)**2 + (1.0 - y)**2)
                if dist < self.feature_synthesis_dist2UP:
                    self.feature_synthesis_dist2UP = dist

        return self.feature_synthesis_dist2UP

    def computeDesignEntropy(self):
        def rearrangeDesignInputs(inputs):
            inputAppearance = []

            for designIndex, designInputString in enumerate(inputs):
                if len(inputAppearance) == 0:
                    inputAppearance = [[]
                                       for k in range(len(designInputString))]

                for inputIndex, val in enumerate(designInputString):
                    if val == "1":
                        inputAppearance[inputIndex].append(designIndex)
            return inputAppearance

        def getEntropy(inputAppearances, base=10):
            N = len(inputAppearances)

            sig = 0
            for i, appearances in enumerate(inputAppearances):
                if len(appearances) == 0:
                    continue
                else:
                    p = len(appearances) / N
                    sig += p * math.log(p, base)
            H = -1 / math.log(N, base) * sig
            return H

        inputs = []
        designs = self.design_synthesis_task_data['designs_evaluated']
        for d in designs:
            inputs.append(d['inputs'])
        inputAppearances = rearrangeDesignInputs(inputs)
        entropy = getEntropy(inputAppearances)
        self.design_entropy = entropy
예제 #29
0
'''
all_data['item_target_enc'] = all_data.groupby('item_id')['target'].transform(
    'mean')

# Fill NaNs
all_data['item_target_enc'].fillna(0.3343, inplace=True)

# Print correlation
encoded_feature = all_data['item_target_enc'].values
print(np.corrcoef(all_data['target'].values, encoded_feature)[0][1])

# See the printed value? It is the correlation coefficient between the target variable and your new encoded feature. You need to **compute correlation coefficient** between the encodings, that you will implement and **submit those to coursera**.

# In[8]:

grader = Grader()

# # 1. KFold scheme

# Explained starting at 41 sec of [Regularization video](https://www.coursera.org/learn/competitive-data-science/lecture/LGYQ2/regularization).

# **Now it's your turn to write the code!**
#
# You may use 'Regularization' video as a reference for all further tasks.
#
# First, implement KFold scheme with five folds. Use KFold(5) from sklearn.model_selection.
#
# 1. Split your data in 5 folds with `sklearn.model_selection.KFold` with `shuffle=False` argument.
# 2. Iterate through folds: use all but the current fold to calculate mean target for each level `item_id`, and  fill the current fold.
#
#     *  See the **Method 1** from the example implementation. In particular learn what `map` and pd.Series.map functions do. They are pretty handy in many situations.
import numpy as np
from numpy.linalg import slogdet, det, solve
import matplotlib.pyplot as plt
import time
from sklearn.datasets import load_digits
from grader import Grader
get_ipython().magic('matplotlib inline')


# ### Grading
# We will create a grader instance below and use it to collect your answers. Note that these outputs will be stored locally inside grader and will be uploaded to the platform only after running submitting function in the last part of this assignment. If you want to make a partial submission, you can run that cell anytime you want.

# In[ ]:

grader = Grader()


# ## Implementing EM for GMM

# For debugging we will use samples from gaussian mixture model with unknown mean, variance and priors. We also added inital values of parameters for grading purposes.

# In[10]:

samples = np.load('samples.npz')
X = samples['data']
pi0 = samples['pi0']
mu0 = samples['mu0']
sigma0 = samples['sigma0']
plt.scatter(X[:, 0], X[:, 1], c='grey', s=30)
plt.axis('equal')
class Subject():
    def __init__(self, jsonFilesRootPath, participant_id):
        self.jsonFilesRootPath = jsonFilesRootPath
        self.participant_id = participant_id

        self.condition = None

        # Data logged from iFEED
        self.learning_task_data = dict()
        self.design_synthesis_task_data = dict()
        self.feature_synthesis_task_data = dict()

        # Concept map data
        self.cmap_prior_data = dict()
        self.cmap_learning_data = dict()

        # Problem answers
        self.feature_classification_answer = []
        self.feature_classification_confidence = []
        self.feature_comparison_answer = []
        self.feature_comparison_confidence = []
        self.design_classification_answer = []
        self.design_classification_confidence = []
        self.design_comparison_answer = []
        self.design_comparison_confidence = []

        # Feature preference questions
        self.feature_preference_data = dict()

        # Self-assessment of learning
        self.learning_self_assessment_data = []

        # Demographic info
        self.demographic_data = dict()
        self.prior_experience_data = dict()

        # Read JSON files
        self.readJSONFiles()

        # Graded score and answers
        self.grader = Grader()
        self.feature_classification_score = None
        self.feature_classification_graded_answers = []
        self.feature_comparison_score = None
        self.feature_comparison_graded_answers = []
        self.design_classification_score = None
        self.design_classification_graded_answers = []
        self.design_comparison_score = None
        self.design_comparison_graded_answers = []

    def gradeAnswers(self, confidenceThreshold=None):
        if confidenceThreshold is not None:
            self.grader.setConfidenceThreshold(confidenceThreshold)

        self.feature_classification_score, self.feature_classification_graded_answers = self.grader.gradeAnswers(
            "feature", "classification", self.feature_classification_answer,
            self.feature_classification_confidence)
        self.feature_comparison_score, self.feature_comparison_graded_answers = self.grader.gradeAnswers(
            "feature", "comparison", self.feature_comparison_answer,
            self.feature_comparison_confidence)
        self.design_classification_score, self.design_classification_graded_answers = self.grader.gradeAnswers(
            "design", "classification", self.design_classification_answer,
            self.design_classification_confidence)
        self.design_comparison_score, self.design_comparison_graded_answers = self.grader.gradeAnswers(
            "design", "comparison", self.design_comparison_answer,
            self.design_comparison_confidence)

    def printScoreSummary(self):
        print("Subject: {0} - condition: {1}".format(self.participant_id,
                                                     self.condition))
        print("Fcl: {0}, Fpwc: {1}, Dcl: {2}, Dpwc: {3}".format(
            self.feature_classification_score, self.feature_comparison_score,
            self.design_classification_score, self.design_comparison_score))

    def readJSONFiles(self):
        dirname = os.path.join(self.jsonFilesRootPath, self.participant_id)

        if not os.path.isdir(dirname):
            print("Failed to load the JSON file - directory not found: {0}".
                  format(dirname))
            return
            # raise OSError("Directory not found: {0}".format(dirname))

        jsonFiles = [
            os.path.join(dirname, f) for f in os.listdir(dirname)
            if os.path.isfile(os.path.join(dirname, f)) and f.endswith(".json")
        ]

        for filename in jsonFiles:
            with open(filename, newline='') as file:
                try:
                    data = json.loads(file.read())

                    if 'treatmentCondition' in data:
                        if self.condition is None:
                            # Condition 1: Manual - without generalization
                            # Condition 2: Automated - without generalization
                            # Condition 3: Interactive - without generalization
                            # Condition 4: Manual - with generalization
                            # Condition 5: Automated - with generalization
                            # Condition 6: Interactive - with generalization
                            self.condition = data['treatmentCondition']

                    if "learning" in os.path.basename(filename):
                        self.learning_task_data = data

                    elif "feature_synthesis" in os.path.basename(filename):
                        self.feature_synthesis_task_data = data

                    elif "design_synthesis" in os.path.basename(filename):
                        self.design_synthesis_task_data = data

                    elif "conceptMap-prior" in os.path.basename(filename):
                        self.cmap_prior_data = data

                    elif "conceptMap-learning" in os.path.basename(filename):
                        self.cmap_learning_data = data

                except:
                    print("Exception while reading: {0}".format(filename))
                    traceback.print_exc()

    def countFeatureParity(self, positive=True):
        return self.grader.countFeatureParity(
            self.feature_classification_graded_answers,
            self.feature_comparison_graded_answers,
            positive=positive)
예제 #32
0
from grader import Grader
from lessons import IntroToPython, Statistics

grader = Grader()
itp_id = grader.register(IntroToPython)
stat_id = grader.register(Statistics)

grader.start_assignment("Tammy", itp_id)
print("Tammy's Lesson:", grader.get_lesson("Tammy"))
print(
    "Tammy's check:",
    grader.check_assignment("Tammy", "a = 1 ; b = 'hello'"),
)
print(
    "Tammmy's other check:",
    grader.check_assignment("Tammy", "a = 1\nb = 'hello'"),
)
print(grader.assignment_summary("Tammy"))

grader.start_assignment("Tammy", stat_id)
print("Tammy's Lessons:", grader.get_lesson("Tammy"))
print("Tammy's check:", grader.check_assignment("Tammy", "avg=5.25"))
print(
    "Tammy's other check:",
    grader.check_assignment("Tammy", "avg = statistics.mean([1, 5, 18, -3])"),
)
print(grader.assignment_summary("Tammy"))
    X_tr, X_val = test_df.iloc[tr_ind], test_df.iloc[val_ind]
    means = X_val['item_id'].map(X_tr.groupby('item_id').target.mean())
    X_val['item_id_target_mean'] = means
    test_df.iloc[val_ind] = X_val
    
prior = test_df['target'].mean()
test_df.fillna(prior, inplace = True)
corr = np.corrcoef(all_data['target'].values, test_df['item_id_target_mean'])[0][1]
# You will need to compute correlation like that
#corr = np.corrcoef(all_data['target'].values, encoded_feature)[0][1]
#corr = 0.41645904885340546
#%%

print(corr)
#%%
Grader.submit_tag('KFold_scheme', corr)

# %%

# 2. Leave-one-out-scheme
test_df = all_data
test_df['item_target_sum'] = test_df.groupby('item_id')['target'].sum()

# %%
test_df.head()

#%%
for i in range(0,len(test_df)):
    print(i)
#%%
test_df['LOOCV'] = 0
예제 #34
0
		results = self.assignment.check(code) 
		if results:
			self.correct_attempts += 1

		return results 

	def lesson(self):
		return self.assignment.lesson()

import uuid
class Grader:
	def __init__(self):
		self.student_graders = {}
		self.assignment_classes = {}

	def register(self, assignment_classes):
		if not issubclass(assignment_class, Assignment):
			raise RuntimeError(
				"Your class does not have the right method"
				)
		id = uuid.uuid4() 
		self.assignment_classes[id] = assignment_class
		return id 


# main file 
from grader import Grader 
from lessons import IntroToPython, Statistics

grader = Grader()
itp_id = grader.register(IntroToPython)