Exemple #1
0
    def __init__(self):
        Resource.__init__(self)
        self.is_daemon = config.run_as_daemon
        self.index_database = DatabaseAPI(config.db_host, config.db_port,
                                 config.db_name, config.db_user, config.db_pass)
        self.indexer = Indexer(config.stopword_file_path, config.tags_to_ignore)

        if self.is_daemon:
            self.run_as_daemon(config.server_port)
Exemple #2
0
    def setUp(self):

        self.server = DatabaseServer()
        self.db = DatabaseAPI()

        self.valid_request_body_dict = {
            "method": "get_login_user_info",
            "query_data": {
                "user_id": "1"
            }
        }

        self.invalid_request_method_body_dict = copy.copy(
            self.valid_request_body_dict
        )  # Same dict except change the valid method to an invalid one
        self.invalid_request_method_body_dict["method"] = "corge_grault"

        self.invalid_request_no_method_body_dict = copy.copy(
            self.valid_request_body_dict)
        del self.invalid_request_no_method_body_dict["method"]

        #self.valid_request_body_json = json.dumps(self.valid_request_body_dict)

        self.valid_request_dict = {
            "packet_size": 200,
            "body_json": self.valid_request_body_dict
        }

        self.valid_request_json = json.dumps(self.valid_request_dict)
Exemple #3
0
 def _dispatch_request(self, request_json: str) -> str:
     request_dict = json.loads(request_json)
     request_dict = request_dict["body_json"] # Continue with only the body JSON, packet size not relevant going forward 
     response_dict = self._validate_request(request_dict)
     if response_dict["status_code"] == 0:
         method, query_data = request_dict["method"], request_dict["query_data"]
         db = DatabaseAPI() # Let it use default JSON map
         try:
             eval_string =  f"db.{method}(query_data=query_data)"
             database_response = eval(eval_string)
         except Exception as e:
             print(f"exception raised by database call")
             response_dict["status_code"] = 1
             print(repr(e))
             database_response = f"Database error: {repr(e)}"  # TODO Pass the Exception from DB API back through the pipe same as any other error message
         if isinstance(database_response, dict):
             response_dict["body_json"] = database_response
         else:
             response_dict["body_json"] = database_response
     # TODO figure out the right way to get the packet size correctly and efficiently. NB especially that Python string or int object
     #   with its various Python methods has more bytes than the underlying raw data in memory.
     response_packet_size = sys.getsizeof(json.dumps(response_dict)) # TODO duplicative call to json.dumps, surely a better way
     response_packet_size += sys.getsizeof(response_packet_size)
     response_dict["packet_size"] = response_packet_size
     response_json = json.dumps(response_dict)
     return response_json
    def __init__(self,api_url, window_name='ML2-FaceEmotionSwap'):
        self.emoji_images = self.init_emojis()
        self.network = self.init_face_emotion_classifier()
        self.face_cascade, self.cascade_files = self.init_face_cascade()

        self.cam = CameraFrame(window_name=window_name)

        # connect to postgreSQL database API
        self.API = api_url + "face/new/"
        print(self.API)
        self.api = DatabaseAPI(self.API)
        self.unknown = self.api.get_number_of_rows()

        # recognize face
        self.known_face_encodings = []
        self.known_face_names = []
        self.face_database_encoding()

        # make a program save to database 1 time per second
        self.time_stamp = ""
Exemple #5
0
    def setUp(self):

        self.db = DatabaseAPI()  # Testing on the real DB, to have restaurants
        # TODO: script that populates the test DBs with realistic restaurants en masse. And/or separate JSON map for this test
        #   (pointing to same test DB filenames for some like users, different one for datespots)
        self.user_data = model_interfaces.UserModelInterface()

        # Need user objects to instantiate a Match
        grortName = "Grort"
        self.grortCurrentLocation = (40.746667, -74.001111)
        grort_data = {
            "name": grortName,
            "current_location": self.grortCurrentLocation
        }
        self.grort_user_id = self.db.post_object({
            "object_model_name": "user",
            "object_data": grort_data
        })
        self.userGrort = self.user_data.lookup_obj(self.grort_user_id)

        drobbName = "Drobb"
        self.drobbCurrentLocation = (40.767376158866554, -73.98615327558278)
        drobb_data = {
            "name": drobbName,
            "current_location": self.drobbCurrentLocation
        }
        self.drobb_user_id = self.db.post_object({
            "object_model_name": "user",
            "object_data": drobb_data
        })
        self.userDrobb = self.user_data.lookup_obj(self.drobb_user_id)

        # distance should be approx 2610m
        # midpoint should be circa (40.75827478958617, -73.99310556132602)

        self.matchGrortDrobb = models.Match(self.userGrort, self.userDrobb)
        assert self.matchGrortDrobb.midpoint is not None

        # Get the candidates list that the DatabaseAPI would be giving to Match:
        self.candidate_datespots_list = self.db.get_datespots_near(
            {"location": self.matchGrortDrobb.midpoint})
Exemple #6
0
    def submit(self, dispatcher: CollectingDispatcher,
               tracker: Tracker,
               domain: Dict[Text, Any],) -> List[Dict]:

        global book_id
        book_id=random.randint(10000,99999)

        DatabaseAPI(book_id,tracker.get_slot('mobile'),tracker.get_slot('source'),tracker.get_slot('arrival'))
        dispatcher.utter_message(template="utter_booked",id=book_id,number=tracker.get_slot('mobile'),
                                 pick_up=tracker.get_slot('source'),
                                 destination=tracker.get_slot('arrival'))

        return []
Exemple #7
0
    def setUp(self):

        data_map = { # todo DRY, this is repeated in every model interface's tests module
            "user_data": "test/testing_mockUserDB.json",
            "datespot_data": "test/testing_mockDatespotDB.json",
            "match_data": "test/testing_mockMatchData.json",
            "review_data": "test/testing_mockReviewData.json"
            }

        with open(TEST_JSON_DB_NAME, 'w') as fobj:
            json.dump(data_map, fobj)
            fobj.seek(0)

        # make sure all the test-mock JSONs exist
        for filename in data_map:
            with open(data_map[filename], 'w') as fobj:
                json.dump({}, fobj)
                fobj.seek(0)

        self.api = ReviewModelInterface(json_map_filename = TEST_JSON_DB_NAME)
        self.datespot_api = DatespotModelInterface(json_map_filename = TEST_JSON_DB_NAME)

        # Make mock restaurant
        self.terrezanos_location = (40.72289821341384, -73.97993915779077)
        self.terrezanos_name = "Terrezano's"
        self.terrezanos_traits = ["italian", "wine", "pasta", "NOT FROM PIZZA HUT", "authentic", "warehouse"]
        self.terrezanos_price_range = 2
        self.terrezanos_hours = [[14, 22], [14, 21], [14, 21], [14, 21], [14, 23], [14, 23], [14, 20]] # ints in [0..23] representing hours, for now

        terrezanos_data = {
            "location" : self.terrezanos_location,
            "name" : self.terrezanos_name,
            "traits" : self.terrezanos_traits,
            "price_range" : self.terrezanos_price_range,
            "hours" : self.terrezanos_hours,
        }
        
        self.db = DatabaseAPI(json_map_filename=TEST_JSON_DB_NAME)
        self.terrezanos_id = self.datespot_api.create(terrezanos_data)

        # Make mock text
        self.mock_text_positive_relevant = "This was a wonderful place to go on a date. I had the pasta. It was authentic and not from Pizza Hut."
        self.expected_sentiment = 0.1906 # todo hardcoded
        self.expected_relevance = round(1 / len(self.mock_text_positive_relevant), 4) # i.e. "date" appears once.
Exemple #8
0
class IndexService(Resource):
    """ 
    Index microservice class.
    """

    isLeaf = True

    def __init__(self):
        Resource.__init__(self)
        self.is_daemon = config.run_as_daemon
        self.index_database = DatabaseAPI(config.db_host, config.db_port,
                                 config.db_name, config.db_user, config.db_pass)
        self.indexer = Indexer(config.stopword_file_path, config.tags_to_ignore)

        if self.is_daemon:
            self.run_as_daemon(config.server_port)

    def run_as_daemon(self, port, unit_test=False):
        self.index_database.make_tables("wordfreq", {"articleid" : "VARCHAR", "word" : "VARCHAR", "frequency" : "INTEGER"}, "(articleid, word)")
        if not unit_test:
            host = self.get_service_ip(config.content_module_name)
            self.index_all_articles(host)
        print("\nStarting the indexer as a daemon listening to port %d..." % port)
        reactor.listenTCP(port, server.Site(self))
        reactor.run()

    # Asks the user for some questions at startup.
    def startup_routine(self):
        indexContent = False
        yes = set(['', 'Y', 'y', 'Yes', 'yes', 'YES'])
        no = set(['N', 'n', 'No', 'no', 'NO'])
        index_on_startup = False
        print("Type 'help' for help.")
        while True:
            print(">> ", end="")
            user_input = str(raw_input())
            if user_input == 'help': # Print available commands to user.
                print()
                print("         <command>   -       <description>")
                print("         help        -       Help.")
                print("         reset       -       Reset index database.")
                print("         init        -       Index all articles from content service on startup.")
                print("         start       -       Start service.")
                print("         exit        -       Quit.")
                print()
            elif user_input == 'reset': # Clearing tables in the index database.
                print("This will delete any existing data and reset the database.")
                print("Are you sure you want to continue? [Y/n] ", end="")
                while True:
                    user_input = str(raw_input())
                    if user_input in yes:
                        self.index_database.make_tables("wordfreq", {"articleid" : "VARCHAR", "word" : "VARCHAR", "frequency" : "INTEGER"}, "(articleid, word)")
                        print("Reset.")
                        break
                    else:
                        print("Abort.")
                        break
            elif user_input == 'init': # Toggle on/off indexing on startup.
                while True:
                    print("Do you want to index all the articles on startup? [Y/n] ", end="") 
                    user_input = str(raw_input())
                    if user_input in yes:
                        index_on_startup = True
                        print("Indexing will begin on start.")
                        break
                    elif user_input in no:
                        print("Indexing will not begin on start.")
                        index_on_startup = False
                        break
                    else:
                        print("Abort.")
                        break
            elif user_input == 'start': # Start indexing service.
                print("Starting index service. Use Ctrl + c to quit.")
                if index_on_startup:
                    host = self.get_service_ip(config.content_module_name)
                    self.index_all_articles(host)
                reactor.listenTCP(config.server_port, server.Site(self))
                reactor.run()
                break
            elif user_input == 'exit': # End program.
                break
            elif user_input == '': # Yes is default on return.
                continue
            else:
                print(user_input + ": command not found")
                continue

    def index_all_articles(self, host, unit_test=False):
        publish_article_list = host + "/list"
        r = requests.get(publish_article_list)
        article_id_list = r.json()['list']
        total = len(article_id_list)
        for i in range(total):
            sys.stdout.write('\r')
            sys.stdout.write("Indexing article {i} of {total}.".format(i=i+1, total=total))
            sys.stdout.flush()
            article_id = article_id_list[i]['id']
            if unit_test:
                self.index_article(article_id_list[i]['title'], article_id)
            else:
                self.index_article(article_id)
        print("\nIndexing completed.")

    # Fetches the publish host address from the communication backend.
    def get_service_ip(self, service_name):
        try:
            r = requests.get(config.comm_host+service_name)
            url = r.json()
            if url:
                url = "http://" + url
        except:
            print('\nUsing hardcoded value for publish host ip.')
            url = 'http://despina.128.no/publish' # Hardcoded url for testing purposes.
        return url

    # Indexes page.
    def index_article(self, article_id, url=None):
        if url:
            values = self.indexer.make_index(url)
            self.index_database.upsert('wordfreq', article_id, values)
        else:
            host = self.get_service_ip(config.content_module_name)
            url = host + "/article/" + article_id # Articles should be found at: http://<publish_service_host>/article/<article_id> 
            values = self.indexer.make_index(url)
            self.index_database.upsert('wordfreq', article_id, values)

    # Handles POST requests from the other microservices.
    def render_POST(self, request):
        d = json.load(request.content)
        # Returns a list of suggestions of words with given word root:
        if d['task'] == 'getSuggestions': # JSON format: {'task' : 'getSuggestions', 'word' : str}
            word_root = d['word']
            data = self.index_database.query("SELECT DISTINCT word FROM wordfreq WHERE word LIKE %s", (word_root+'%',))
            response = {"suggestions" : [t[0] for t in data]}
            return json.dumps(response)
        # Returns all articles where given word occurs:
        elif d['task'] == 'getArticles': # JSON format: {'task' : 'getArticles', 'word' : str}
            word = d['word']
            data = self.index_database.query("SELECT articleid FROM wordfreq WHERE word = %s", (word,))
            response = {"articleID" : [t[0] for t in data]}
            return json.dumps(response)
        # Returns a list of all words and the total number of occurences of the words:
        elif d['task'] == 'getFrequencyList': # JSON format: {'task' : 'getFrequencyList'}
            data = self.index_database.query("SELECT word, sum(frequency) FROM wordfreq GROUP BY word")
            response = {}
            for value in data:
                response[value[0]] = value[1]
            return json.dumps(response)
        # Indexes published article with given id:
        elif d['task'] == 'publishedArticle':
            article_id = d['articleID']
            self.index_article(article_id)
            return '200 - thanks!'
        # Removes index of article with given id:
        elif d['task'] == 'removedArticle':
            article_id = d['articleID']
            self.index_database.remove(article_id)
            return('200 - ok!')
        else:
            return('404')
Exemple #9
0
    def setUp(self):

        # Todo: Import subprocess and run the preprocess tastes script to sort the text file
        #   That doesn't need to be in setUp though, just once per time the tests are run. Maybe the shell script should do it.

        data_map = { # todo DRY, this is repeated in every model interface's tests module
            "user_data": "test/testing_mockUserDB.json",
            "datespot_data": "test/testing_mockDatespotDB.json",
            "match_data": "test/testing_mockMatchData.json",
            "review_data": "test/testing_mockReviewData.json",
            "message_data": "test/testing_mockMessageData.json",
            "chat_data": "test/testing_mockChatData.json"
            }

        with open(TEST_JSON_DB_NAME, 'w') as fobj:
            json.dump(data_map, fobj)
            fobj.seek(0)

        # make sure all the test-mock JSONs exist:
        for filename in data_map:
            with open(data_map[filename], 'w') as fobj:
                json.dump({}, fobj)
                fobj.seek(0)
        
        # Instantiate model interface and DB connection:
        self.api = MessageModelInterface(json_map_filename = TEST_JSON_DB_NAME)
        self.user_api = UserModelInterface(json_map_filename = TEST_JSON_DB_NAME)
        self.chat_api = ChatModelInterface(json_map_filename = TEST_JSON_DB_NAME)
        self.db = DatabaseAPI(json_map_filename = TEST_JSON_DB_NAME)


        # Make three mock users

        self.akatosh_name = "Akatosh"
        self.akatosh_location = (40.73517750328247, -74.00683227856715)
        self.akatosh_id = "1"
        akatosh_data = {
            "name": self.akatosh_name,
            "current_location": self.akatosh_location,
            "force_key": self.akatosh_id
        }
        self.user_api.create(akatosh_data)

        self.stendarr_name = "Stendarr"
        self.stendarr_location = (40.74769591216627, -73.99447266003756)
        self.stendarr_id = "2"
        stendarr_data = {
            "name": self.stendarr_name,
            "current_location": self.stendarr_location,
            "force_key": self.stendarr_id
        }
        self.user_api.create(stendarr_data)

        self.talos_name = "Talos"
        self.talos_location = (40.76346250260515, -73.98013893542904)
        self.talos_id = "3"
        talos_data = {
            "name": self.talos_name,
            "current_location": self.talos_location,
            "force_key": self.talos_id
        }
        self.user_api.create(talos_data)

        # Mock message data:

        self.mock_bilateral_timestamp = time.time()
        self.quick_mock_chat_data = {
            "start_time": time.time(),
            "participant_ids": [self.akatosh_id, self.stendarr_id]
        }
        self.mock_chat_id_1 = self.chat_api.create(self.quick_mock_chat_data)
        self.single_sentence_text = "Worship the Nine, do your duty, and heed the commands of the saints and priests."
        self.expected_sentiment_single_sentence = 0.296 # todo hardcoded

        # Mock message where user expresses tastes info

        self.tastes_message_timestamp = time.time()
        self.akatosh_taste_name = "italian"
        self.tastes_message_text = f"I love {self.akatosh_taste_name} food"
        self.expected_sentiment_tastes_sentence = 0.6369 # todo hardcoded
            # Todo import the identical sentiment analyzer here, and run it on the sentence, and save the result.
        self.tastes_message_data = {
            "time_sent": self.tastes_message_timestamp,
            "sender_id": self.akatosh_id,
            "chat_id": self.mock_chat_id_1,
            "text": self.tastes_message_text
        }
        self.tastes_message_id = self.api.create(self.tastes_message_data)

        # Mock message with negative taste sentiment
        self.negative_tastes_message_timestamp = time.time()
        self.akatosh_negative_taste_name = "thai"
        self.negative_tastes_message_text = f"I don't really like {self.akatosh_negative_taste_name} food"
        self.expected_sentiment_negative_tastes_sentence = -0.3241 # todo hardcoded
        self.negative_tastes_message_data = {
            "time_sent": self.negative_tastes_message_timestamp,
            "sender_id": self.akatosh_id,
            "chat_id": self.mock_chat_id_1,
            "text": self.negative_tastes_message_text
        }
        self.negative_tastes_message_id = self.api.create(self.negative_tastes_message_data)
class ML2FaceEmojiSwap:

    CASCADE_FILE_DIR = 'haarcascade_files'

    def __init__(self,api_url, window_name='ML2-FaceEmotionSwap'):
        self.emoji_images = self.init_emojis()
        self.network = self.init_face_emotion_classifier()
        self.face_cascade, self.cascade_files = self.init_face_cascade()

        self.cam = CameraFrame(window_name=window_name)

        # connect to postgreSQL database API
        self.API = api_url + "face/new/"
        print(self.API)
        self.api = DatabaseAPI(self.API)
        self.unknown = self.api.get_number_of_rows()

        # recognize face
        self.known_face_encodings = []
        self.known_face_names = []
        self.face_database_encoding()

        # make a program save to database 1 time per second
        self.time_stamp = ""

    @staticmethod
    def init_emojis():
        emoji_dir = './emojis'
        emoji_images = os.listdir(emoji_dir)
        emoji_images.sort()
        emoji_images = [os.path.join(emoji_dir, f) for f in emoji_images]
        emoji_images = [cv.imread(f, cv.IMREAD_COLOR) for f in emoji_images]
        return emoji_images

    @staticmethod
    def init_face_emotion_classifier():
        network = EmotionClassifier()
        network.load_model()
        return network

    @staticmethod
    def init_face_cascade():

        cascade_files = ['haarcascade_frontalface_alt.xml',
                         'haarcascade_frontalface_alt2.xml',
                         'haarcascade_frontalface_alt_tree.xml',
                         'haarcascade_frontalface_default.xml']
        face_cascade_file = os.path.join(
            ML2FaceEmojiSwap.CASCADE_FILE_DIR, cascade_files[0])
        return cv.CascadeClassifier(face_cascade_file), cascade_files

    def face_database_encoding(self) :
        self.known_face_encodings = []
        self.known_face_names = []
        face_database = self.api.get_face_encoding()
        for fd in face_database:
            face = fd[0]
            encoding = fd[1]
            toarray = np.array(encoding)
            face_encoding = toarray.astype(np.float)
            self.known_face_encodings.append(face_encoding)
            self.known_face_names.append(face)

    def detect_faces_name(self, frame, faces):
        rgb_small_frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)

        face_locations = []
        for x,y,w,h in faces:
            left = x
            top = y
            right = x + w
            bottom = y + h
            face_locations.append([top, right, bottom, left])
        
        face_encodings = face_recognition.face_encodings(
            rgb_small_frame, face_locations)
        face_names = []
        for face_encoding in face_encodings:
            matches = face_recognition.compare_faces(
                self.known_face_encodings, face_encoding)
            name = "Unknown-" + str(self.unknown)

            if True in matches:
                first_match_index = matches.index(True)
                name = self.known_face_names[first_match_index]
            else:
                self.known_face_encodings.append(face_encoding)
                self.known_face_names.append(name)
                self.unknown += 1

            face_names.append(name)

        return face_encodings, face_names

    def detect_faces(self, frame):
        resize_frame = cv.resize(frame, (0,0), fx=0.25, fy=0.25)

        gray = cv.cvtColor(resize_frame, cv.COLOR_BGR2GRAY)
        faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)

        # No faces found
        if isinstance(faces, tuple):
            return
        
        # get the face from the image frame and resize it for prediction
        image_faces = []
        for idx, (x, y, w, h) in enumerate(faces):
            face = gray[y:y+h, x:x+w]
            face = cv.resize(face, (SIZE_FACE, SIZE_FACE),
                             interpolation=cv.INTER_CUBIC) / 255.
            image_faces.append(face)

        faces_for_prediction = np.array(image_faces)
        prediction = self.network.predict(faces_for_prediction)
        prediction = np.round(prediction, 3)
        prediction_class = np.argmax(prediction, 1)

        # adapted for screen
        detection_result = np.ones((200, 1800), np.uint8)

        # face recognition to show name
        face_encodings, face_names = self.detect_faces_name(resize_frame, faces)

        zipdata = enumerate(zip(faces, face_names, face_encodings))
        capture_images = [None] * len(face_names)
        # swap each face with its predicted class emoji.
        # create an additional detection result,
        # which shows the cut out face and the model prediction as bar chart.
        for idx, ((x, y, w, h), name, face_encoding) in zipdata:
            x *= 4
            y *= 4
            w *= 4
            h *= 4
            emoji = self.emoji_images[prediction_class[idx]]
            emoji = cv.resize(emoji, (w, h))

            roi = frame[y:y+h, x:x+w]

            img2gray = cv.cvtColor(emoji, cv.COLOR_BGR2GRAY)

            ret, mask = cv.threshold(img2gray, 10, 255, cv.THRESH_BINARY)

            mask_inv = cv.bitwise_not(mask)

            img1_bg = cv.bitwise_and(roi, roi, mask=mask_inv)

            img2_fg = cv.bitwise_and(emoji, emoji, mask=mask)

            dst = cv.add(img1_bg, img2_fg)
            frame[y:y+h, x:x+w] = dst

            cv.rectangle(frame, (x,y), (x+h,y+w), (0,0,225), 2)
            font = cv.FONT_HERSHEY_DUPLEX
            cv.rectangle(frame,(x,y+w),
              (x+h,y+w+50),(0,0,255), cv.FILLED)
            cv.putText(frame, name, (x+5,y+w+35),
                        font, 1.0, (255, 255, 255), 1)
            
            # create for nine faces a detection result
            if idx < 9:
                image_faces[idx] = cv.resize(
                    image_faces[idx], (200, 200)) * 255
                capture_images[idx] = image_faces[idx].copy()
                for index, emotion in enumerate(EMOTIONS):
                    cv.putText(image_faces[idx],
                               emotion,
                               (10, index * 20 + 20),
                               cv.FONT_HERSHEY_PLAIN,
                               0.8,
                               (0, 255, 0),
                               1)
                    cv.rectangle(image_faces[idx],
                                 (100, index * 20 + 10),
                                 (100 + int(prediction[idx][index] * 100),
                                  (index + 1) * 20 + 4),
                                 (255, 0, 0),
                                 -1)

                x1 = idx * 200
                y1 = 0

                detection_result[y1:y1+200, x1:x1+200] = image_faces[idx]

        timestamp = self.get_timestamp()
        if not self.time_stamp == timestamp:
            result_to_database = zip(face_names, prediction.tolist(), capture_images, face_encodings)
            self.save_to_database(timestamp, result_to_database)
            self.time_stamp = timestamp
        return detection_result

    def save_to_database(self, timestamp, result):
        self.face_database_encoding()
        for (face, emotion, image, face_encoding) in result:
            data = {'action': 'update', "emotion_detail" :[json.loads(json.dumps({"timestamp": timestamp, "emotion": emotion}))], "face": face, 'action': 'update'}
            matches = face_recognition.compare_faces(
                self.known_face_encodings, face_encoding)
            if True in matches:
                first_match_index = matches.index(True)
                data['face'] = self.known_face_names[first_match_index]
                self.api.put(data)
            else :
                payload = {'action': 'update', 'face': data['face'], 'face_image': image.tolist(), 'face_encoding': face_encoding.tolist()}
                self.api.post(payload)

    def get_timestamp(self):
        # cast back => datetime.datetime.strptime('2019-06-26 10:51:41', '%Y-%m-%d %H:%M:%S')
        return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    def load_cascade_file(self, file_idx):
        if not self.cascade_files:
            return
        file_idx -= 49
        cascade_file_name = self.cascade_files[int(file_idx)]
        face_cascade_file = os.path.join(
            self.CASCADE_FILE_DIR, cascade_file_name)
        print(face_cascade_file)
        self.face_cascade.load(face_cascade_file)

    def run_on_camera(self, cam_dev=0):
        HELP_TEXT = '"Esc" to Quit, "H" for Help, "F" to Toggle Fullscreen'
        fps = ''
        h_text = HELP_TEXT
        show_help = True
        full_screen = False

        # change `cam_dev` to 0 when you want to run with your PC camera
        self.cam.open_cam_usb(0)
        # this use for run camera on Jetson board
        # self.cam.open_cam_onboard()
        do_exit = False
        while not do_exit:

            start = time.time()

            retval, frame_origin = self.cam.get_frame()
            frame = frame_origin.copy()
            detection_result = self.detect_faces(frame)
            end = time.time()

            if end-start != 0:
                fps = str(round(1 / (end - start), 2))

            key = cv.waitKey(10)
            if show_help:
                h_text = HELP_TEXT + '; FPS: '+fps
            if key == 27:  # ESC key: quit program
                do_exit = True
            elif key == ord('H') or key == ord('h'):  # toggle help message
                show_help = not show_help
            elif key == ord('F') or key == ord('f'):
                full_screen = not full_screen
                self.cam.set_fullscreen(full_screen)

            elif ord('1') <= key <= ord('4'):
                self.load_cascade_file(key)

            self.cam.show_in_window(
                frame_origin, frame, detection_result, help_text=h_text)

        self.cam.close()

    def run_on_image(self, image_name):

        image_origin = cv.imread(image_name)

        image = image_origin.copy()
        detection_result = self.detect_faces(frame=image)

        self.cam.show_in_window(image_origin, image, detection_result)

        cv.waitKey()
        cv.destroyAllWindows()

    def run_on_image_directory(self, image_directory):
        full_screen = False
        do_exit = False
        help_text = '"Esc" to Quit; "F" to Toggle Fullscreen'
        allowed_image_extension = ['.jpg', '.png']
        images = os.listdir(image_directory)
        images = [file for file in images if file.endswith(
            tuple(allowed_image_extension))]

        image_index = 0
        image_count = len(images)

        while not do_exit:
            image_index = image_index % image_count

            image_origin = cv.imread(
                join(image_directory, images[image_index]))

            image = image_origin.copy()
            detection_result = self.detect_faces(frame=image)

            self.cam.show_in_window(
                image_origin, image, detection_result, help_text)

            key = cv.waitKey(5000)
            if key == 27:  # ESC key: quit program
                do_exit = True
            elif key == ord('F') or key == ord('f'):
                full_screen = not full_screen
                self.cam.set_fullscreen(full_screen)

            image_index += 1
            image_index = image_index % image_count

        cv.destroyAllWindows()
Exemple #11
0
 def set_up(self):
     self.index = DatabaseAPI(config.db_host, config.db_port,
                              config.db_name, config.db_user, config.db_pass)
     self.passed_tests = 0
     self.failed_tests = 0
Exemple #12
0
class DatabaseAPI_test:
    ''' 
    Test class for database_api.py. The functions _make_connection() and _close_connection() 
    are implicitly tested when the other functions are tested. 
    '''
    def set_up(self):
        self.index = DatabaseAPI(config.db_host, config.db_port,
                                 config.db_name, config.db_user,
                                 config.db_pass)
        self.passed_tests = 0
        self.failed_tests = 0

    # Making a table, inserting a few items, querying the database for said item.
    # Explicitly tests make_table(), upsert() and query() togheter, in database_API.py.
    # Implicitly tests _make_connection() and _close_connection() in database_API.py.
    def test_routine1(self):
        print("Test 1: ", end='')
        self.index.make_tables('wordfreq', {
            "articleid": "VARCHAR",
            "word": "VARCHAR",
            "frequency": "INTEGER"
        }, "(articleid, word)")
        self.index.upsert(table_name='wordfreq',
                          article_id='test1',
                          values=[('test_word1', 1), ('test_word2', 2)])
        query_data = self.index.query(
            "SELECT articleid, word, frequency FROM wordfreq WHERE word = 'test_word2';"
        )
        if query_data[0][0] == 'test1' and query_data[0][
                1] == 'test_word2' and query_data[0][2] == 2:
            self.passed_tests += 1
            print('pass')
        else:
            self.failed_tests += 1
            print('failed')

    # More or less the same as test_routine1(), but now also tests remove().
    # Explicitly tests make_table(), upsert(), query() and remove() in database_API.py.
    # Implicitly tests _make_connection() and _close_connection() in database_API.py.
    def test_routine2(self):
        print("Test 2: ", end='')
        self.index.make_tables('wordfreq', {
            "articleid": "VARCHAR",
            "word": "VARCHAR",
            "frequency": "INTEGER"
        }, "(articleid, word)")
        self.index.upsert(table_name='wordfreq',
                          article_id='test2',
                          values=[('test_word', 1)])
        self.index.remove('wordfreq', 'articleid', 'test2')
        query_data = self.index.query(
            "SELECT articleid, word, frequency FROM wordfreq WHERE articleid = 'test2';"
        )
        if query_data == []:
            self.passed_tests += 1
            print('pass')
        else:
            self.failed_tests += 1
            print('failed')

    # Tests if upsert() updates values correctly.
    def test_routine3(self):
        print("Test 3: ", end='')
        self.index.make_tables('wordfreq', {
            "articleid": "VARCHAR",
            "word": "VARCHAR",
            "frequency": "INTEGER"
        }, "(articleid, word)")
        self.index.upsert(table_name='wordfreq',
                          article_id='test3',
                          values=[('test_word', 1)])
        self.index.upsert(table_name='wordfreq',
                          article_id='test3',
                          values=[('test_word', 5)])
        query_data = self.index.query(
            "SELECT articleid, word, frequency FROM wordfreq WHERE articleid = 'test3';"
        )
        if query_data[0][2] == 5:
            self.passed_tests += 1
            print('pass')
        else:
            self.failed_tests += 1
            print('failed')

    def run_tests(self):
        print('Testing DatabaseAPI:')
        self.set_up()
        self.test_routine1()
        self.test_routine2()
        self.test_routine3()

    def print_results(self):
        print("DatabaseAPI test results:")
        print("Passed", self.passed_tests, "out of",
              self.passed_tests + self.failed_tests, "tests.")
Exemple #13
0
 def set_up(self):
     self.index = DatabaseAPI(config.db_host, config.db_port,
                              config.db_name, config.db_user,
                              config.db_pass)
     self.passed_tests = 0
     self.failed_tests = 0
Exemple #14
0
class TestHelloWorldThings(unittest.TestCase):

    # Todo very important to test hard cases. Huge cases, edge cases, corner cases--try to break it.

    def setUp(self):

        # Blank out the test JSON files:
        data_map = {  # todo DRY, this is repeated in every model interface's tests module
            "user_data": "test/testing_mockUserDB.json",
            "datespot_data": "test/testing_mockDatespotDB.json",
            "match_data": "test/testing_mockMatchData.json",
            "review_data": "test/testing_mockReviewData.json",
            "message_data": "test/testing_mockMessageData.json",
            "chat_data": "test/testing_mockChatData.json"
        }
        with open(TEST_JSON_DB_NAME, 'w') as fobj:
            json.dump(data_map, fobj)
            fobj.seek(0)

        # make sure all the test-mock JSONs exist
        for filename in data_map:
            with open(data_map[filename], 'w') as fobj:
                json.dump({}, fobj)
                fobj.seek(0)

        # Make a mock restaurant
        self.terrezanos_location = (40.72289821341384, -73.97993915779077)
        self.terrezanos_name = "Terrezano's"
        self.terrezanos_traits = [
            "italian", "wine", "pasta", "NOT FROM PIZZA HUT", "authentic",
            "warehouse"
        ]
        self.terrezanos_price_range = 2
        self.terrezanos_hours = [
            [14, 22], [14, 21], [14, 21], [14, 21], [14, 23], [14,
                                                               23], [14, 20]
        ]  # ints in [0..23] representing hours, for now

        terrezanos_data = {
            "location": self.terrezanos_location,
            "name": self.terrezanos_name,
            "traits": self.terrezanos_traits,
            "price_range": self.terrezanos_price_range,
            "hours": self.terrezanos_hours,
        }

        # Make mock text
        self.mock_text_positive_relevant = "This was a wonderful place to go on a date. I had the pasta. It was authentic and not from Pizza Hut."
        self.expected_sentiment = 0.1906  # todo hardcoded
        self.expected_relevance = round(
            1 / len(self.mock_text_positive_relevant),
            SENTIMENT_DECIMAL_PLACES)  # i.e. "date" appears once.

        # Connect to the database with the mock data set
        self.db = DatabaseAPI(json_map_filename=TEST_JSON_DB_NAME)
        args_data = {
            "object_model_name": "datespot",
            "object_data": terrezanos_data
        }
        self.terrezanos_id = self.db.post_object(args_data)

        # Instantiate mock Review object

        self.review_obj = models.Review(datespot_id=self.terrezanos_id,
                                        text=self.mock_text_positive_relevant)

    def test_init(self):
        self.assertIsInstance(self.review_obj, models.Review)

    def test_tokenize(self):
        """Does the internal tokenize method tokenize a multi-sentence text into an array of sentences as expected?"""
        expected_sentences = [
            "This was a wonderful place to go on a date.", "I had the pasta.",
            "It was authentic and not from Pizza Hut."
        ]
        self.review_obj._tokenize()
        for i in range(len(expected_sentences)):
            self.assertEqual(expected_sentences[i],
                             self.review_obj._sentences[i])

        # todo test the length of each

    def test_analyze_sentiment(self):
        self.review_obj._analyze_sentiment()
        self.assertAlmostEqual(self.expected_sentiment,
                               self.review_obj._sentiment)

    def test_public_sentiment_attribute(self):
        """
        Can the sentiment be accessed as expected via the object's public sentiment attribute?
        """
        self.assertAlmostEqual(self.expected_sentiment,
                               self.review_obj.sentiment)

    def test_analyze_relevance(self):
        self.review_obj._analyze_relevance()
        self.assertAlmostEqual(self.expected_relevance,
                               self.review_obj._analyze_relevance())

    def test_public_relevance_attribute(
            self):  # todo tbd if any external code ever needs this
        self.assertAlmostEqual(self.expected_relevance,
                               self.review_obj.relevance)
Exemple #15
0
class TestHelloWorldThings(unittest.TestCase):
    def setUp(self):

        self.db = DatabaseAPI()  # Testing on the real DB, to have restaurants
        # TODO: script that populates the test DBs with realistic restaurants en masse. And/or separate JSON map for this test
        #   (pointing to same test DB filenames for some like users, different one for datespots)
        self.user_data = model_interfaces.UserModelInterface()

        # Need user objects to instantiate a Match
        grortName = "Grort"
        self.grortCurrentLocation = (40.746667, -74.001111)
        grort_data = {
            "name": grortName,
            "current_location": self.grortCurrentLocation
        }
        self.grort_user_id = self.db.post_object({
            "object_model_name": "user",
            "object_data": grort_data
        })
        self.userGrort = self.user_data.lookup_obj(self.grort_user_id)

        drobbName = "Drobb"
        self.drobbCurrentLocation = (40.767376158866554, -73.98615327558278)
        drobb_data = {
            "name": drobbName,
            "current_location": self.drobbCurrentLocation
        }
        self.drobb_user_id = self.db.post_object({
            "object_model_name": "user",
            "object_data": drobb_data
        })
        self.userDrobb = self.user_data.lookup_obj(self.drobb_user_id)

        # distance should be approx 2610m
        # midpoint should be circa (40.75827478958617, -73.99310556132602)

        self.matchGrortDrobb = models.Match(self.userGrort, self.userDrobb)
        assert self.matchGrortDrobb.midpoint is not None

        # Get the candidates list that the DatabaseAPI would be giving to Match:
        self.candidate_datespots_list = self.db.get_datespots_near(
            {"location": self.matchGrortDrobb.midpoint})

    def test_hash(self):
        """Does the __hash__() method's return value match the value obtained by mimicking its logic in the test code?"""
        expected_hash = hash((self.grort_user_id, self.drobb_user_id))
        actual_hash = hash(self.matchGrortDrobb)
        self.assertEqual(actual_hash, expected_hash)

    def test_hash_output_consistent_regardless_of_user_order(self):
        """Does Match(Alice, Bob) hash to same value as Match(Bob, Alice)?"""
        expected_hash = hash(self.matchGrortDrobb)
        match_obj_flipped_members = models.Match(
            self.userDrobb, self.userGrort
        )  # Reverse the user1 and user2 roles from those in setUp
        assert match_obj_flipped_members.user1 == self.matchGrortDrobb.user2 and match_obj_flipped_members.user2 == self.matchGrortDrobb.user1
        actual_hash = hash(match_obj_flipped_members)
        self.assertEqual(actual_hash, expected_hash)

        # Test same for the public id property attribute
        expected_id = self.matchGrortDrobb.id
        actual_id = match_obj_flipped_members.id
        self.assertEqual(actual_id, expected_id)

        # TODO The implementation code isn't correct as of 6/10. Observed same user ID strings producing differnent Match id hashes
        #   during Postman endpoint testing.

    def test_public_id_attribute_matches_hash(self):
        """Does the public Match.id attribute-property bear the expected relationship to the return value Match.__hash__()?"""
        expected_id = str(hex(hash(self.matchGrortDrobb)))[
            2:]  # Mimic logic in Match._id() private method
        actual_id = self.matchGrortDrobb.id
        self.assertEqual(actual_id, expected_id)

    def test_compute_midpoint(self):
        maxDelta = 0.01
        approxExpectedMidpoint = (40.75827478958617, -73.99310556132602)
        expectedLat, expectedLon = approxExpectedMidpoint
        actualLat, actualLon = self.matchGrortDrobb.midpoint
        self.assertAlmostEqual(actualLat,
                               expectedLat,
                               delta=expectedLat * maxDelta)
        self.assertAlmostEqual(actualLon,
                               expectedLon,
                               delta=expectedLat * maxDelta)

    def test_public_distance_attribute(self):
        """Does the public distance attribute-property return the expected distance?"""
        expected_distance = geo_utils.haversine(self.grortCurrentLocation,
                                                self.drobbCurrentLocation)
        actual_distance = self.matchGrortDrobb.distance
        self.assertAlmostEqual(actual_distance, expected_distance)

    def test_get_suggestions_return_type(self):
        """Does Match.get_suggestions() external method return the expected type?"""
        expected_return_type = list
        returned_obj = self.matchGrortDrobb.suggestions(
            self.candidate_datespots_list)
        self.assertIsInstance(returned_obj, expected_return_type)

    def test_get_suggestions_return_not_null(self):
        returned_obj = self.matchGrortDrobb.suggestions(
            self.candidate_datespots_list)
        self.assertGreater(len(returned_obj), 0)

    def test_get_suggestions_return_shape(self):
        """Does the returned object's shape (nested lists/tuples) match the expected structure?"""
        returned_obj = self.matchGrortDrobb.suggestions(
            self.candidate_datespots_list)
        # each "suggestion" should be a Datespot object literal:
        for element in returned_obj:
            self.assertIsInstance(element, models.Datespot)

    # def test_db_user_method_returns_expected_query_results(self):
    #     """Does the method that calls the main database API return the expected query results, i.e.
    #     a list of serialized restaurant object dicts sorted by distance?"""
    #     returned_obj = self.matchGrortDrobb._get_datespots_by_geography()
    #     #print(returned_obj)
    #     self.assertIsInstance(returned_obj, list)
    #     self.assertGreater(len(returned_obj), 0)

    def test_internal_scorer_method_returns_expected_data(self):
        """Does the internal "private" method responsible for scoring each nearby datespot return a non-empty
        list?"""
        returned_obj = self.matchGrortDrobb._score_nearby_datespots(
            self.candidate_datespots_list)
        self.assertIsInstance(returned_obj, list)
        self.assertGreater(len(returned_obj), 0)
Exemple #16
0
class TestHelloWorldThings(unittest.TestCase):

    def setUp(self):
        # Todo: Very hard to test the model in isolation here. Because chats require messages, and current architecture is that
        #   messages are created directly into a chat. The model itself doesn't persist any data, so it's impossible for the 
        #   create_message method to find the correct chat to update. 

        # Same un-DRY boilerplate to configure the testing DB:
        data_map = { # todo DRY, this is repeated in every model interface's tests module
            "user_data": "test/testing_mockUserDB.json",
            "datespot_data": "test/testing_mockDatespotDB.json",
            "match_data": "test/testing_mockMatchData.json",
            "review_data": "test/testing_mockReviewData.json",
            "message_data": "test/testing_mockMessageData.json",
            "chat_data": "test/testing_mockChatData.json"
            }

        with open(TEST_JSON_DB_NAME, 'w') as fobj:
            json.dump(data_map, fobj)
            fobj.seek(0)

        # make sure all the test-mock JSONs exist and start as "{}":
        for filename in data_map:
            with open(data_map[filename], 'w') as fobj:
                json.dump({}, fobj)
                fobj.seek(0)

        # Mock DB
        self.db = DatabaseAPI(json_map_filename=TEST_JSON_DB_NAME)
        self.message_data = model_interfaces.MessageModelInterface(json_map_filename=TEST_JSON_DB_NAME)
        self.chat_data = model_interfaces.ChatModelInterface(json_map_filename=TEST_JSON_DB_NAME)

        # Mock users

        self.akatosh_name = "Akatosh"
        self.akatosh_location = (40.73517750328247, -74.00683227856715)
        self.akatosh_id = "1"
        akatosh_data = {
            "name": self.akatosh_name,
            "current_location": self.akatosh_location,
            "force_key": self.akatosh_id
        }
        self.db.post_object({"object_model_name": "user", "object_data": akatosh_data}) # Don't need to store the key returned by this, forced it to "1"

        self.stendarr_name = "Stendarr"
        self.stendarr_location = (40.74769591216627, -73.99447266003756)
        self.stendarr_id = "2"
        stendarr_json = {
            "name": self.stendarr_name,
            "current_location": self.stendarr_location,
            "force_key": self.stendarr_id
        }
        self.db.post_object({"object_model_name": "user", "object_data": stendarr_json})

        self.talos_name = "Talos"
        self.talos_location = (40.76346250260515, -73.98013893542904)
        self.talos_id = "3"
        talos_json = {
            "name": self.talos_name,
            "current_location": self.talos_location,
            "force_key": self.talos_id
        }
        self.db.post_object({"object_model_name": "user", "object_data": talos_json})


        # Instantiate chat object with two participants

        # Messages can't be instantiated without a Chat ID
        #   Todo and that chat ID must come from the DB. So must initiate a chat in the DB.

        self.chat_start_time = time.time()
        self.chat_json = {
            "start_time": self.chat_start_time,
            "participant_ids": [self.akatosh_id, self.stendarr_id]
        }

        self.chat_id = self.db.post_object({"object_model_name": "chat", "object_data": self.chat_json})
        

        # Mock messages

        self.first_timestamp = time.time()
        self.first_message_text = "Lord Akatosh lends you his might. When your own strength fails you, trust in the Nine."

        self.first_message_json = {
            "time_sent": self.first_timestamp,
            "sender_id": self.akatosh_id,
            "chat_id": self.chat_id,
            "text": self.first_message_text

        }
        self.first_message_id = self.db.post_object({"object_model_name": "message", "object_data": self.first_message_json})
        self.first_message_obj = self.message_data.lookup_obj(self.first_message_id)
        self.first_message_sentiment = self.first_message_obj.sentiment

        # Second message in same chat:
        self.second_timestamp = time.time()
        self.second_message_text = "K, thanks for letting me know!"
        self.second_message_json = {
            "time_sent": self.second_timestamp,
            "sender_id": self.stendarr_id,
            "chat_id": self.chat_id,
            "text": self.second_message_text
        }
        self.second_message_id = self.db.post_object({"object_model_name": "message", "object_data": self.second_message_json})
        self.second_message_obj = self.message_data.lookup_obj(self.second_message_id)
        self.second_message_sentiment = self.second_message_obj.sentiment

        # create_message should append the message to the chat 

        # Fetch the chat object at the end, to create one with the messages appended
        self.chat_obj = self.chat_data.lookup_obj(self.chat_id)

    def test_init(self):
        self.assertIsInstance(self.chat_obj, models.Chat)
    
    def test_eq(self):
        """Does the custom __eq__() behave as expected?"""
        self.assertTrue(self.chat_obj == self.chat_obj)
    
    def test_message_id_order(self):
        """Are both test messages in the Chat, and is the first message before the second?"""
        self.assertEqual([message.id for message in self.chat_obj.messages], [self.first_message_id, self.second_message_id]) # todo hacky/obfuscating to have the list comp here
    
    def test_average_sentiment(self):
        """Does the average sentiment match the value expected from separate calculation on same values?"""
        self.assertIsNotNone(self.chat_obj.sentiment)
        expected_mean_sentiment = round((self.first_message_sentiment + self.second_message_sentiment) / 2, SENTIMENT_DECIMAL_PLACES)
        self.assertEqual(expected_mean_sentiment, self.chat_obj.sentiment)
Exemple #17
0
    def setUp(self):

        # Blank out the test JSON files:
        data_map = {  # todo DRY, this is repeated in every model interface's tests module
            "user_data": "test/testing_mockUserDB.json",
            "datespot_data": "test/testing_mockDatespotDB.json",
            "match_data": "test/testing_mockMatchData.json",
            "review_data": "test/testing_mockReviewData.json",
            "message_data": "test/testing_mockMessageData.json",
            "chat_data": "test/testing_mockChatData.json"
        }
        with open(TEST_JSON_DB_NAME, 'w') as fobj:
            json.dump(data_map, fobj)
            fobj.seek(0)

        # make sure all the test-mock JSONs exist
        for filename in data_map:
            with open(data_map[filename], 'w') as fobj:
                json.dump({}, fobj)
                fobj.seek(0)

        # Make a mock restaurant
        self.terrezanos_location = (40.72289821341384, -73.97993915779077)
        self.terrezanos_name = "Terrezano's"
        self.terrezanos_traits = [
            "italian", "wine", "pasta", "NOT FROM PIZZA HUT", "authentic",
            "warehouse"
        ]
        self.terrezanos_price_range = 2
        self.terrezanos_hours = [
            [14, 22], [14, 21], [14, 21], [14, 21], [14, 23], [14,
                                                               23], [14, 20]
        ]  # ints in [0..23] representing hours, for now

        terrezanos_data = {
            "location": self.terrezanos_location,
            "name": self.terrezanos_name,
            "traits": self.terrezanos_traits,
            "price_range": self.terrezanos_price_range,
            "hours": self.terrezanos_hours,
        }

        # Make mock text
        self.mock_text_positive_relevant = "This was a wonderful place to go on a date. I had the pasta. It was authentic and not from Pizza Hut."
        self.expected_sentiment = 0.1906  # todo hardcoded
        self.expected_relevance = round(
            1 / len(self.mock_text_positive_relevant),
            SENTIMENT_DECIMAL_PLACES)  # i.e. "date" appears once.

        # Connect to the database with the mock data set
        self.db = DatabaseAPI(json_map_filename=TEST_JSON_DB_NAME)
        args_data = {
            "object_model_name": "datespot",
            "object_data": terrezanos_data
        }
        self.terrezanos_id = self.db.post_object(args_data)

        # Instantiate mock Review object

        self.review_obj = models.Review(datespot_id=self.terrezanos_id,
                                        text=self.mock_text_positive_relevant)
Exemple #18
0
class DatabaseAPI_test:
    ''' 
    Test class for database_api.py. The functions _make_connection() and _close_connection() 
    are implicitly tested when the other functions are tested. 
    '''

    def set_up(self):
        self.index = DatabaseAPI(config.db_host, config.db_port,
                                 config.db_name, config.db_user, config.db_pass)
        self.passed_tests = 0
        self.failed_tests = 0

    # Making a table, inserting a few items, querying the database for said item.
    # Explicitly tests make_table(), upsert() and query() togheter, in database_API.py.
    # Implicitly tests _make_connection() and _close_connection() in database_API.py.
    def test_routine1(self):
        print("Test 1: ",end='')
        self.index.make_tables('wordfreq', {"articleid" : "VARCHAR", "word" : "VARCHAR", "frequency" : "INTEGER"}, "(articleid, word)")
        self.index.upsert(table_name='wordfreq', article_id='test1', values=[('test_word1', 1), ('test_word2', 2)])
        query_data = self.index.query("SELECT articleid, word, frequency FROM wordfreq WHERE word = 'test_word2';")
        if query_data[0][0] == 'test1' and query_data[0][1] == 'test_word2' and query_data[0][2] == 2:
            self.passed_tests += 1
            print('pass')
        else:
            self.failed_tests += 1
            print('failed')

    # More or less the same as test_routine1(), but now also tests remove().
    # Explicitly tests make_table(), upsert(), query() and remove() in database_API.py.
    # Implicitly tests _make_connection() and _close_connection() in database_API.py.
    def test_routine2(self):
        print("Test 2: ", end='')
        self.index.make_tables('wordfreq', {"articleid" : "VARCHAR", "word" : "VARCHAR", "frequency" : "INTEGER"}, "(articleid, word)")
        self.index.upsert(table_name='wordfreq', article_id='test2', values=[('test_word', 1)])
        self.index.remove('wordfreq', 'articleid', 'test2')
        query_data = self.index.query("SELECT articleid, word, frequency FROM wordfreq WHERE articleid = 'test2';")
        if query_data == []:
            self.passed_tests += 1
            print('pass')
        else:
            self.failed_tests += 1
            print('failed')

    # Tests if upsert() updates values correctly.
    def test_routine3(self):
        print("Test 3: ", end='')
        self.index.make_tables('wordfreq', {"articleid" : "VARCHAR", "word" : "VARCHAR", "frequency" : "INTEGER"}, "(articleid, word)")
        self.index.upsert(table_name='wordfreq', article_id='test3', values=[('test_word', 1)])
        self.index.upsert(table_name='wordfreq', article_id='test3', values=[('test_word', 5)])
        query_data = self.index.query("SELECT articleid, word, frequency FROM wordfreq WHERE articleid = 'test3';")
        if query_data[0][2] == 5:
            self.passed_tests += 1
            print('pass')
        else:
            self.failed_tests += 1
            print('failed')

    def run_tests(self):
        print('Testing DatabaseAPI:')
        self.set_up()
        self.test_routine1()
        self.test_routine2()
        self.test_routine3()

    def print_results(self):
        print("DatabaseAPI test results:")
        print("Passed", self.passed_tests, "out of", self.passed_tests + self.failed_tests, "tests.")
Exemple #19
0
    def setUp(self):

        # Blank out the test JSON files:
        data_map = {  # todo DRY, this is repeated in every model interface's tests module
            "user_data": "test/testing_mockUserDB.json",
            "datespot_data": "test/testing_mockDatespotDB.json",
            "match_data": "test/testing_mockMatchData.json",
            "review_data": "test/testing_mockReviewData.json",
            "message_data": "test/testing_mockMessageData.json",
            "chat_data": "test/testing_mockChatData.json"
        }
        with open(TEST_JSON_DB_NAME, 'w') as fobj:
            json.dump(data_map, fobj)
            fobj.seek(0)

        # make sure all the test-mock JSONs exist
        for filename in data_map:
            with open(data_map[filename], 'w') as fobj:
                json.dump({}, fobj)
                fobj.seek(0)

        # Instantiate DatabaseAPI object and model interfaces
        self.db = DatabaseAPI(json_map_filename=TEST_JSON_DB_NAME)
        self.user_data = model_interfaces.UserModelInterface(
            json_map_filename=TEST_JSON_DB_NAME
        )  # These are cumbersome, but no implementation code actually needs to ask the DB API for
        self.datespot_data = model_interfaces.DatespotModelInterface(
            json_map_filename=TEST_JSON_DB_NAME
        )  #   ...a model object. DB API having a "get_object()" method would be convenient
        self.match_data = model_interfaces.MatchModelInterface(
            json_map_filename=TEST_JSON_DB_NAME
        )  #  ...for the tests code but have no other use.
        self.review_data = model_interfaces.ReviewModelInterface(
            json_map_filename=TEST_JSON_DB_NAME)
        self.chat_data = model_interfaces.ChatModelInterface(
            json_map_filename=TEST_JSON_DB_NAME)
        self.message_data = model_interfaces.MessageModelInterface(
            json_map_filename=TEST_JSON_DB_NAME)

        # Data for mock users
        self.azura_name = "Azura"
        self.azura_location = (40.73517750328247, -74.00683227856715)
        self.azura_id = "1"
        self.azura_existing_taste_name = "dawn"
        self.azura_existing_taste_strength = 0.9
        self.azura_existing_taste_datapoints = 3
        self.azura_existing_tastes = {  # This is the data in the object's internal format, for testing convenience not for a call to the external create_user()
            self.azura_existing_taste_name: [
                self.azura_existing_taste_strength,
                self.azura_existing_taste_datapoints
            ]
        }

        self.azura_data = {
            "name": self.azura_name,
            "current_location": self.azura_location,
            "force_key": self.azura_id
        }

        self.boethiah_name = "Boethiah"
        self.boethiah_location = (40.76346250260515, -73.98013893542904)
        self.boethiah_id = "2"
        self.boethiah_data = {
            "name": self.boethiah_name,
            "current_location": self.boethiah_location,
            "force_key": self.boethiah_id
        }

        self.hircine_name = "Hircine"
        self.hircine_location = (40.76525023033338, -73.96722141608099)
        self.hircine_id = "3"
        self.hircine_data = {
            "name": self.hircine_name,
            "current_location": self.hircine_location,
            "force_key": self.hircine_id
        }

        # Data for mock Datespot
        self.terrezanos_location = (40.737291166191476, -74.00704685527774)
        self.terrezanos_name = "Terrezano's"
        self.terrezanos_traits = {
            "italian": [1.0, "discrete"],
            "wine": [0.5, 1],
            "pasta": [0.6, 2],
            "NOT FROM PIZZA HUT": [0.01, 2],
            "authentic": [-0.05, 3],
            "warehouse": [1.0, "discrete"]
        }
        self.terrezanos_price_range = 2
        self.terrezanos_hours = [
            [14, 22], [14, 21], [14, 21], [14, 21], [14, 23], [14,
                                                               23], [14, 20]
        ]  # ints in [0..23] representing hours, for now

        self.terrezanos_data = {
            "location": self.terrezanos_location,
            "name": self.terrezanos_name,
            "traits": self.terrezanos_traits,
            "price_range": self.terrezanos_price_range,
            "hours": self.terrezanos_hours,
        }

        self.terrezanos_id = self.db.post_object({
            "object_model_name":
            "datespot",
            "object_data":
            self.terrezanos_data
        })

        # Data for mock Review of Terrezano's

        self.mock_text_positive_relevant = "This was a wonderful place to go on a date. I had the pasta. It was authentic and not from Pizza Hut."
        self.expected_sentiment = 0.1906  # todo hardcoded
        self.expected_relevance = round(1 /
                                        len(self.mock_text_positive_relevant),
                                        4)  # i.e. "date" appears once.
        self.terrezanos_review_data = {
            "datespot_id": self.terrezanos_id,
            "text": self.mock_text_positive_relevant
        }

        # Add three users for use in testing compound objects
        self.db.post_object({
            "object_model_name": "user",
            "object_data": self.azura_data
        })
        self.db.post_object({
            "object_model_name": "user",
            "object_data": self.boethiah_data
        })
        self.db.post_object({
            "object_model_name": "user",
            "object_data": self.hircine_data
        })

        # Data for mock Message and Chat
        self.mock_bilateral_timestamp = time.time()
        self.quick_mock_chat_data = {
            "start_time": time.time(),
            "participant_ids": [self.azura_id, self.boethiah_id]
        }
        self.mock_chat_id_1 = self.db.post_object({
            "object_model_name":
            "chat",
            "object_data":
            self.quick_mock_chat_data
        })  # Need a Chat to create a Message
        self.single_sentence_text = "Worship the Nine, do your duty, and heed the commands of the saints and priests."
        self.expected_sentiment_single_sentence = 0.296  # todo hardcoded

        self.mock_bilateral_message_data = {
            "time_sent": self.mock_bilateral_timestamp,
            "sender_id": self.azura_id,
            "chat_id": self.mock_chat_id_1,
            "text": self.single_sentence_text
        }

        # Add two matches for Azura user

        @freeze_time("2021-05-01 12:00:01")
        def freezetime_match_1(match_data: dict) -> str:
            """Creates the match at a specified timestamp to avoid unittest forcing the timestamps to be identical;
            returns the id string."""
            return self.db.post_object(match_data)

        @freeze_time("2021-05-01 12:00:02")  # One second later
        def freezetime_match_2(match_data: dict) -> str:
            """Creates the second match at a later timestamp."""
            return self.db.post_object(match_data)

        match_data_azura_boethiah = {
            "object_model_name": "match",
            "object_data": {
                "user1_id": self.azura_id,
                "user2_id": self.boethiah_id
            }
        }

        match_data_hircine_azura = {
            "object_model_name": "match",
            "object_data": {
                "user1_id": self.hircine_id,
                "user2_id": self.azura_id
            }
        }

        self.match_id_azura_boethiah = freezetime_match_1(
            match_data_azura_boethiah)
        self.match_id_hircine_azura = freezetime_match_2(
            match_data_hircine_azura)

        self.match_obj_azura_boethiah = self.match_data.lookup_obj(
            self.match_id_azura_boethiah)
        self.match_obj_hircine_azura = self.match_data.lookup_obj(
            self.match_id_hircine_azura)
Exemple #20
0
class TestHelloWorldThings(unittest.TestCase):
    """Basic non-brokenness tests."""
    @freeze_time(datetime.datetime.now())
    def setUp(self):

        # Blank out the test JSON files:
        data_map = {  # todo DRY, this is repeated in every model interface's tests module
            "user_data": "test/testing_mockUserDB.json",
            "datespot_data": "test/testing_mockDatespotDB.json",
            "match_data": "test/testing_mockMatchData.json",
            "review_data": "test/testing_mockReviewData.json",
            "message_data": "test/testing_mockMessageData.json",
            "chat_data": "test/testing_mockChatData.json"
        }
        with open(TEST_JSON_DB_NAME, 'w') as fobj:
            json.dump(data_map, fobj)
            fobj.seek(0)

        # make sure all the test-mock JSONs exist
        for filename in data_map:
            with open(data_map[filename], 'w') as fobj:
                json.dump({}, fobj)
                fobj.seek(0)

        # Instantiate DatabaseAPI object and model interfaces
        self.db = DatabaseAPI(json_map_filename=TEST_JSON_DB_NAME)
        self.user_data = model_interfaces.UserModelInterface(
            json_map_filename=TEST_JSON_DB_NAME
        )  # These are cumbersome, but no implementation code actually needs to ask the DB API for
        self.datespot_data = model_interfaces.DatespotModelInterface(
            json_map_filename=TEST_JSON_DB_NAME
        )  #   ...a model object. DB API having a "get_object()" method would be convenient
        self.match_data = model_interfaces.MatchModelInterface(
            json_map_filename=TEST_JSON_DB_NAME
        )  #  ...for the tests code but have no other use.
        self.review_data = model_interfaces.ReviewModelInterface(
            json_map_filename=TEST_JSON_DB_NAME)
        self.chat_data = model_interfaces.ChatModelInterface(
            json_map_filename=TEST_JSON_DB_NAME)
        self.message_data = model_interfaces.MessageModelInterface(
            json_map_filename=TEST_JSON_DB_NAME)

        # Data for mock users
        self.azura_name = "Azura"
        self.azura_location = (40.73517750328247, -74.00683227856715)
        self.azura_id = "1"
        self.azura_existing_taste_name = "dawn"
        self.azura_existing_taste_strength = 0.9
        self.azura_existing_taste_datapoints = 3
        self.azura_existing_tastes = {  # This is the data in the object's internal format, for testing convenience not for a call to the external create_user()
            self.azura_existing_taste_name: [
                self.azura_existing_taste_strength,
                self.azura_existing_taste_datapoints
            ]
        }

        self.azura_data = {
            "name": self.azura_name,
            "current_location": self.azura_location,
            "force_key": self.azura_id
        }

        self.boethiah_name = "Boethiah"
        self.boethiah_location = (40.76346250260515, -73.98013893542904)
        self.boethiah_id = "2"
        self.boethiah_data = {
            "name": self.boethiah_name,
            "current_location": self.boethiah_location,
            "force_key": self.boethiah_id
        }

        self.hircine_name = "Hircine"
        self.hircine_location = (40.76525023033338, -73.96722141608099)
        self.hircine_id = "3"
        self.hircine_data = {
            "name": self.hircine_name,
            "current_location": self.hircine_location,
            "force_key": self.hircine_id
        }

        # Data for mock Datespot
        self.terrezanos_location = (40.737291166191476, -74.00704685527774)
        self.terrezanos_name = "Terrezano's"
        self.terrezanos_traits = {
            "italian": [1.0, "discrete"],
            "wine": [0.5, 1],
            "pasta": [0.6, 2],
            "NOT FROM PIZZA HUT": [0.01, 2],
            "authentic": [-0.05, 3],
            "warehouse": [1.0, "discrete"]
        }
        self.terrezanos_price_range = 2
        self.terrezanos_hours = [
            [14, 22], [14, 21], [14, 21], [14, 21], [14, 23], [14,
                                                               23], [14, 20]
        ]  # ints in [0..23] representing hours, for now

        self.terrezanos_data = {
            "location": self.terrezanos_location,
            "name": self.terrezanos_name,
            "traits": self.terrezanos_traits,
            "price_range": self.terrezanos_price_range,
            "hours": self.terrezanos_hours,
        }

        self.terrezanos_id = self.db.post_object({
            "object_model_name":
            "datespot",
            "object_data":
            self.terrezanos_data
        })

        # Data for mock Review of Terrezano's

        self.mock_text_positive_relevant = "This was a wonderful place to go on a date. I had the pasta. It was authentic and not from Pizza Hut."
        self.expected_sentiment = 0.1906  # todo hardcoded
        self.expected_relevance = round(1 /
                                        len(self.mock_text_positive_relevant),
                                        4)  # i.e. "date" appears once.
        self.terrezanos_review_data = {
            "datespot_id": self.terrezanos_id,
            "text": self.mock_text_positive_relevant
        }

        # Add three users for use in testing compound objects
        self.db.post_object({
            "object_model_name": "user",
            "object_data": self.azura_data
        })
        self.db.post_object({
            "object_model_name": "user",
            "object_data": self.boethiah_data
        })
        self.db.post_object({
            "object_model_name": "user",
            "object_data": self.hircine_data
        })

        # Data for mock Message and Chat
        self.mock_bilateral_timestamp = time.time()
        self.quick_mock_chat_data = {
            "start_time": time.time(),
            "participant_ids": [self.azura_id, self.boethiah_id]
        }
        self.mock_chat_id_1 = self.db.post_object({
            "object_model_name":
            "chat",
            "object_data":
            self.quick_mock_chat_data
        })  # Need a Chat to create a Message
        self.single_sentence_text = "Worship the Nine, do your duty, and heed the commands of the saints and priests."
        self.expected_sentiment_single_sentence = 0.296  # todo hardcoded

        self.mock_bilateral_message_data = {
            "time_sent": self.mock_bilateral_timestamp,
            "sender_id": self.azura_id,
            "chat_id": self.mock_chat_id_1,
            "text": self.single_sentence_text
        }

        # Add two matches for Azura user

        @freeze_time("2021-05-01 12:00:01")
        def freezetime_match_1(match_data: dict) -> str:
            """Creates the match at a specified timestamp to avoid unittest forcing the timestamps to be identical;
            returns the id string."""
            return self.db.post_object(match_data)

        @freeze_time("2021-05-01 12:00:02")  # One second later
        def freezetime_match_2(match_data: dict) -> str:
            """Creates the second match at a later timestamp."""
            return self.db.post_object(match_data)

        match_data_azura_boethiah = {
            "object_model_name": "match",
            "object_data": {
                "user1_id": self.azura_id,
                "user2_id": self.boethiah_id
            }
        }

        match_data_hircine_azura = {
            "object_model_name": "match",
            "object_data": {
                "user1_id": self.hircine_id,
                "user2_id": self.azura_id
            }
        }

        self.match_id_azura_boethiah = freezetime_match_1(
            match_data_azura_boethiah)
        self.match_id_hircine_azura = freezetime_match_2(
            match_data_hircine_azura)

        self.match_obj_azura_boethiah = self.match_data.lookup_obj(
            self.match_id_azura_boethiah)
        self.match_obj_hircine_azura = self.match_data.lookup_obj(
            self.match_id_hircine_azura)

    def test_init(self):
        """Was an object of the expected type instantiated?"""
        self.assertIsInstance(self.db, DatabaseAPI)

    def test_model_interface_constructor_calls(self):
        """Does the DB interface call the expected model interface for each model name?"""
        expected_interfaces = {
            "user": model_interfaces.UserModelInterface,
            "datespot": model_interfaces.DatespotModelInterface,
            "match": model_interfaces.MatchModelInterface,
            "review": model_interfaces.ReviewModelInterface,
            "message": model_interfaces.MessageModelInterface,
            "chat": model_interfaces.ChatModelInterface
        }
        for model_name in expected_interfaces:
            actual_interface = self.db._model_interface(model_name)
            self.assertIsInstance(actual_interface,
                                  expected_interfaces[model_name])

    def test_validate_model_name(self):
        """Does the validator raise the expected error for a bad model name?"""
        with self.assertRaises(ValueError):
            self.db._validate_model_name("foo")

    ### Tests for post_object() and get_object() ###

    def test_post_obj_user(self):
        talos_name = "Talos"
        talos_location = (40.76346250260515, -73.98013893542904)
        expected_talos_id = "4"
        talos_data = {
            "name": talos_name,
            "current_location": talos_location,
            "force_key": expected_talos_id
        }
        actual_talos_id = self.db.post_object({
            "object_model_name": "user",
            "object_data": talos_data
        })
        self.assertIsInstance(actual_talos_id, str)
        talos_obj = self.user_data.lookup_obj(actual_talos_id)
        self.assertIsInstance(talos_obj, models.User)
        self.assertEqual(expected_talos_id, actual_talos_id)

        with self.assertRaises(
                ValueError
        ):  # Trying with key already in DB should raise error
            talos_data["force_key"] = self.hircine_id  # Used in setUp
            actual_talos_id = self.db.post_object({
                "object_model_name": "user",
                "object_data": talos_data
            })

    def test_post_obj_datespot(self):
        domenicos_location = (40.723889184134926, -73.97613846772394)
        domenicos_name = "Domenico's"
        domenicos_traits = {
            "coffee": [
                1.0, 1
            ],  # todo...So we're imagining this as ~ how good the coffee is, rather than the discrete fact that they do serve coffee?
            "coffee shop": [1.0, "discrete"],
            "gourmet": [0.25, 1],
            "americano": [0.15, 1],
            "knows coffee": [0.3, 1],
            "bricks": [0.6, 1],
            "burger juice": [0.9, 1]
        }
        domenicos_price_range = 1
        domenicos_hours = [[8, 19], [8, 19], [8, 19], [8, 19], [8, 19],
                           [8, 19], [10, 17]]

        domenicos_data = {
            "location": domenicos_location,
            "name": domenicos_name,
            "traits": domenicos_traits,
            "price_range": domenicos_price_range,
            "hours": domenicos_hours
        }

        domenicos_id = self.db.post_object({
            "object_model_name": "datespot",
            "object_data": domenicos_data
        })
        domenicos_obj = self.datespot_data.lookup_obj(domenicos_id)

        self.assertIsInstance(domenicos_obj, models.Datespot)

    def test_post_obj_match(self):
        match_data = {"user1_id": self.azura_id, "user2_id": self.boethiah_id}
        match_id = self.db.post_object({
            "object_model_name": "match",
            "object_data": match_data
        })
        match_obj = self.match_data.lookup_obj(match_id)
        self.assertIsInstance(match_obj, models.Match)

    def test_post_obj_review(self):
        review_id = self.db.post_object({
            "object_model_name":
            "review",
            "object_data":
            self.terrezanos_review_data
        })
        review_obj = self.review_data.lookup_obj(review_id)
        self.assertIsInstance(review_obj, models.Review)

    def test_post_obj_message(self):
        message_id = self.db.post_object({
            "object_model_name":
            "message",
            "object_data":
            self.mock_bilateral_message_data
        })
        message_obj = self.message_data.lookup_obj(message_id)
        self.assertIsInstance(message_obj, models.Message)

    def test_post_obj_chat(self):
        chat_id = self.db.post_object({
            "object_model_name": "chat",
            "object_data": self.quick_mock_chat_data
        })
        chat_obj = self.chat_data.lookup_obj(chat_id)
        self.assertIsInstance(chat_obj, models.Chat)

    ### Tests for put_json() ###

    def test_put_json_update_user(self):

        # Test updating a User's location:
        new_data = {
            "current_location": (40.737291166191476, -74.00704685527774),
        }
        args_data = {
            "object_model_name": "user",
            "object_id": self.azura_id,
            "update_data": new_data
        }
        self.db.put_data(args_data)
        azura_obj = self.user_data.lookup_obj(self.azura_id)
        self.assertAlmostEqual(new_data["current_location"],
                               azura_obj.current_location)

    def test_updating_unsupported_model_raises_error(self):
        """Does attempting to update a model for which updates aren't supported raise the 
        expected error?"""
        unsupported_models = ["review", "message"]
        arbitrary_object_id = "a"
        arbitrary_data = {"foo": "bar"}
        for model in unsupported_models:
            with self.assertRaises(ValueError):
                self.db.put_data({
                    "object_model_name": model,
                    "object_id": arbitrary_object_id,
                    "update_data": arbitrary_data
                })

    # TODO complete for other models and their main anticipated update cases

    ### Tests for post_decision() ###
    def test_post_yes_decision_no_match(self):
        """Does posting a "yes" decision that doesn't create a match return the expected JSON?"""
        decision_yes_data = {
            "user_id": self.azura_id,
            "candidate_id": self.boethiah_id,
            "outcome": True
        }
        expected_response_data = {"match_created": False}
        actual_response_data = self.db.post_decision(decision_yes_data)
        self.assertEqual(expected_response_data, actual_response_data)

    def test_post_yes_decision_yes_match(self):
        """Does posting a "yes" decision that creates a match return the expected JSON?"""
        # Post a decision of Azura liking Boethiah:
        azura_decision_yes_data = {
            "user_id": self.azura_id,
            "candidate_id": self.boethiah_id,
            "outcome": True
        }
        self.db.post_decision(azura_decision_yes_data)

        # Post a second decision of Boethiah liking Azura
        boethiah_decision_yes_data = {
            "user_id": self.boethiah_id,
            "candidate_id": self.azura_id,
            "outcome": True
        }

        expected_response_data = {"match_created": True}

        actual_response_data = self.db.post_decision(
            boethiah_decision_yes_data)
        self.assertEqual(expected_response_data, actual_response_data)

    def test_post_no_decision(self):
        """Does posting a "no" decision return the expected JSON?"""
        decision_no_data = {
            "user_id": self.azura_id,
            "candidate_id": self.boethiah_id,
            "outcome": False
        }
        expected_response_data = {"match_created": False}
        actual_response_data = self.db.post_decision(decision_no_data)
        self.assertEqual(expected_response_data, actual_response_data)

    def test_non_boolean_outcome_raises_error(self):
        """Does posting JSON without a boolean outcome raise the expected error?"""
        bad_data = {
            "user_id": self.azura_id,
            "candidate_id": self.boethiah_id,
            "outcome": 2
        }
        with self.assertRaises(TypeError):
            self.db.post_decision(bad_data)

    ### Tests for get_datespots_near() ###

    # Code that makes live API calls isn't covered by the main tests suite

    def test_get_datespots_near_cache_only_default_radius(self):
        """Does the method return a string matching the expected shape of a JSON-ified list of Datespots
        in response to a query that provides valid location but no radius?"""
        location_query_data = {
            "location": (40.737291166191476, -74.00704685527774)
        }
        results = self.db.get_datespots_near(location_query_data)
        self.assertIsInstance(results, list)
        self.assertGreater(len(results), 0)
        first_result = results[0]
        distance, datespot = first_result  # it's a two-element tuple
        self.assertIsInstance(distance, float)
        self.assertIsInstance(datespot, models.Datespot)

    def test_get_datespots_near_cache_only_nondefault_radius(self):
        """Does the method return the expected JSON in response to a query that provides valid location
        and specifies a non-default_radius?"""
        location_query_data = {
            "location": (40.737291166191476, -74.00704685527774),
            "radius": 4000
        }
        results = self.db.get_datespots_near(location_query_data)
        self.assertIsInstance(results, list)
        self.assertGreater(len(results), 0)
        first_result = results[0]
        distance, datespot = first_result  # it's a two-element tuple
        self.assertIsInstance(distance, float)
        self.assertIsInstance(datespot, models.Datespot)

    ### Tests for get_datespot_suggestions() ###

    def test_get_candidate_datespots(self):
        """Does the method return the expected JSON in response to JSON matching with a valid Match?"""

        # Put a Match in the mock DB
        match_data = {"user1_id": self.azura_id, "user2_id": self.boethiah_id}
        match_id = self.db.post_object({
            "object_model_name": "match",
            "object_data": match_data
        })
        match_obj = self.match_data.lookup_obj(match_id)

        query_data = {"match_id": match_id}

        results = self.db.get_candidate_datespots(query_data)
        self.assertIsInstance(results, list)
        self.assertGreater(len(results), 0)
        # Terrezanos should be the only Datespot known to the DB here:
        self.assertEqual(results[0][1].id, self.terrezanos_id)

    ### Tests for other public methods ###
    def test_get_next_candidate(
        self
    ):  # We have two Users in the DB, so one will be the other's candidate
        query_data = {"user_id": self.azura_id}
        result = self.db.get_next_candidate(query_data)
        candidate_name = result["name"]
        self.assertEqual(self.boethiah_name, candidate_name)

    # TODO Post / get obj / get json for all of:
    #     user
    #     datespot
    #     match
    #     review
    #     message
    #     chat

    def test_get_matches_list(self):

        expected_result_data = [
            {  # Expecte the Match created second to appear first in the list
                "match_id":
                self.match_id_hircine_azura,
                "match_timestamp":
                self.match_obj_hircine_azura.timestamp,
                "match_partner_info":
                self.user_data.render_candidate(self.hircine_id)
            },
            {
                "match_id":
                self.match_id_azura_boethiah,
                "match_timestamp":
                self.match_obj_hircine_azura.timestamp,
                "match_partner_info":
                self.user_data.render_candidate(self.boethiah_id)
            }
        ]
        actual_result_data = self.db.get_matches_list(
            query_data={"user_id": self.azura_id})

        #self.assertEqual(actual_result_data, expected_result_data)
        # TODO Had to give up for now on asserting about the timestamps due to weird behavior--keep getting them created with identical timestamps
        #   when created in setUp, even though the timestamps increment in match.py.  Unittests for match.py confirmed that the underlying sort works.

        for result in actual_result_data:
            self.assertIsInstance(result, dict)
            self.assertEqual(len(result), len(expected_result_data[0]))

    def test_get_suggestions(self):

        expected_result_data = [  # Terrezanos is the only Datespot in the DB here
            self.datespot_data.render_obj(self.terrezanos_id)
        ]
        actual_result_data = self.db.get_suggestions_list(
            {"match_id": self.match_id_azura_boethiah})
        self.assertEqual(actual_result_data, expected_result_data)
Exemple #21
0
class TestHelloWorldThings(unittest.TestCase):
    def setUp(self):

        # Boilerplate mock data environment stuff
        data_map = {  # todo DRY, this is repeated in every model interface's tests module
            "user_data": "test/testing_mockUserDB.json",
            "datespot_data": "test/testing_mockDatespotDB.json",
            "match_data": "test/testing_mockMatchData.json",
            "review_data": "test/testing_mockReviewData.json",
            "message_data": "test/testing_mockMessageData.json",
            "chat_data": "test/testing_mockChatData.json"
        }

        with open(TEST_JSON_DB_NAME, 'w') as fobj:
            json.dump(data_map, fobj)
            fobj.seek(0)

        # make sure all the test-mock JSONs exist:
        for filename in data_map:
            with open(data_map[filename], 'w') as fobj:
                json.dump({}, fobj)
                fobj.seek(0)

        # Mock DB
        self.db = DatabaseAPI(json_map_filename=TEST_JSON_DB_NAME)
        self.user_data = model_interfaces.UserModelInterface(
            json_map_filename=TEST_JSON_DB_NAME)

        # Make three mock users

        self.akatosh_name = "Akatosh"
        self.akatosh_location = (40.73517750328247, -74.00683227856715)
        self.akatosh_id = "1"
        akatosh_json = {
            "name": self.akatosh_name,
            "current_location": self.akatosh_location,
            "force_key": self.akatosh_id
        }
        self.db.post_object({
            "object_model_name": "user",
            "object_data": akatosh_json
        })  # Don't need to store the key returned by this, forced it to "1"

        self.stendarr_name = "Stendarr"
        self.stendarr_location = (40.74769591216627, -73.99447266003756)
        self.stendarr_id = "2"
        stendarr_json = {
            "name": self.stendarr_name,
            "current_location": self.stendarr_location,
            "force_key": self.stendarr_id
        }
        self.db.post_object({
            "object_model_name": "user",
            "object_data": stendarr_json
        })

        self.talos_name = "Talos"
        self.talos_location = (40.76346250260515, -73.98013893542904)
        self.talos_id = "3"
        talos_json = {
            "name": self.talos_name,
            "current_location": self.talos_location,
            "force_key": self.talos_id
        }
        self.db.post_object({
            "object_model_name": "user",
            "object_data": talos_json
        })

        # Instantiate mock simple bilateral message (one recipient)

        self.mock_bilateral_timestamp = time.time()
        self.single_sentence_text = "Worship the Nine, do your duty, and heed the commands of the saints and priests."
        self.expected_sentiment_single_sentence = 0.296  # todo hardcoded

        self.mock_chat_id_1 = "1a"
        self.message_obj = models.Message(
            time_sent=self.mock_bilateral_timestamp,
            sender=self.user_data.lookup_obj(self.akatosh_id),
            chat_id=self.mock_chat_id_1,
            text=self.single_sentence_text)

        # Instantiate mock multi-sentence message.
        self.multisentence_text = "I'm Akatosh blah blah blah. Lord Akatosh lends you his might. When your own strength fails you, trust in the Nine."
        self.expected_sentiment_multisentence = 0.092  # todo hardcoded

        self.mock_chat_id_2 = "2a"
        self.multisentence_message_obj = models.Message(
            time_sent=time.time(),
            sender=self.user_data.lookup_obj(self.akatosh_id),
            chat_id=self.mock_chat_id_2,
            text=self.multisentence_text)

    def test_init(self):
        self.assertIsInstance(self.message_obj, models.Message)

    def test_eq(self):
        """Does the custom __eq__() behave as expected?"""
        self.assertTrue(self.message_obj == self.message_obj)
        self.assertFalse(self.message_obj == self.multisentence_message_obj)

    def test_hash(self):
        """Does the integer returned by __hash__ match the results of mimicing the same hashing steps
        manually?"""
        expected_hash = hash(
            str(self.mock_bilateral_timestamp) + self.akatosh_id)
        self.assertEqual(expected_hash, hash(self.message_obj))

    def test_str(self):
        """Does the __str__() method return the expected string for a known message?"""
        expected_string = f"{self.message_obj.time_sent}:\t{self.message_obj.sender.id}:\t{self.message_obj.text}"
        actual_string = str(self.message_obj)
        self.assertEqual(expected_string, str(self.message_obj))
        self.assertEqual(
            expected_string, __str__(self.message_obj)
        )  # Todo for some reason, the single line of the __str__ method doesn't register as covered

    def test_tokenize(self):
        """Does _tokenize split a multisentence text into the expected sentences?"""
        expected_sentences = [
            "I'm Akatosh blah blah blah.", "Lord Akatosh lends you his might.",
            "When your own strength fails you, trust in the Nine."
        ]
        self.multisentence_message_obj._tokenize()
        for i in range(len(expected_sentences)):
            self.assertEqual(expected_sentences[i],
                             self.multisentence_message_obj._sentences[i])

        # Todo test other sentence-ending punctuation.

    def test_analyze_sentiment(self):
        """Does the sentiment match the hardcoded expected sentiment?"""
        self.message_obj._analyze_sentiment()
        self.assertAlmostEqual(self.expected_sentiment_single_sentence,
                               self.message_obj._sentiment_avg)
        self.multisentence_message_obj._analyze_sentiment()
        self.assertAlmostEqual(self.expected_sentiment_multisentence,
                               self.multisentence_message_obj._sentiment_avg)

    def test_str(self):
        """Does the __str__ method return the expected string?"""
        expected_string = f"{self.mock_bilateral_timestamp}:\t{self.akatosh_id}:\t{self.single_sentence_text}"
Exemple #22
0
    def setUp(self):

        # Boilerplate mock data environment stuff
        data_map = {  # todo DRY, this is repeated in every model interface's tests module
            "user_data": "test/testing_mockUserDB.json",
            "datespot_data": "test/testing_mockDatespotDB.json",
            "match_data": "test/testing_mockMatchData.json",
            "review_data": "test/testing_mockReviewData.json",
            "message_data": "test/testing_mockMessageData.json",
            "chat_data": "test/testing_mockChatData.json"
        }

        with open(TEST_JSON_DB_NAME, 'w') as fobj:
            json.dump(data_map, fobj)
            fobj.seek(0)

        # make sure all the test-mock JSONs exist:
        for filename in data_map:
            with open(data_map[filename], 'w') as fobj:
                json.dump({}, fobj)
                fobj.seek(0)

        # Mock DB
        self.db = DatabaseAPI(json_map_filename=TEST_JSON_DB_NAME)
        self.user_data = model_interfaces.UserModelInterface(
            json_map_filename=TEST_JSON_DB_NAME)

        # Make three mock users

        self.akatosh_name = "Akatosh"
        self.akatosh_location = (40.73517750328247, -74.00683227856715)
        self.akatosh_id = "1"
        akatosh_json = {
            "name": self.akatosh_name,
            "current_location": self.akatosh_location,
            "force_key": self.akatosh_id
        }
        self.db.post_object({
            "object_model_name": "user",
            "object_data": akatosh_json
        })  # Don't need to store the key returned by this, forced it to "1"

        self.stendarr_name = "Stendarr"
        self.stendarr_location = (40.74769591216627, -73.99447266003756)
        self.stendarr_id = "2"
        stendarr_json = {
            "name": self.stendarr_name,
            "current_location": self.stendarr_location,
            "force_key": self.stendarr_id
        }
        self.db.post_object({
            "object_model_name": "user",
            "object_data": stendarr_json
        })

        self.talos_name = "Talos"
        self.talos_location = (40.76346250260515, -73.98013893542904)
        self.talos_id = "3"
        talos_json = {
            "name": self.talos_name,
            "current_location": self.talos_location,
            "force_key": self.talos_id
        }
        self.db.post_object({
            "object_model_name": "user",
            "object_data": talos_json
        })

        # Instantiate mock simple bilateral message (one recipient)

        self.mock_bilateral_timestamp = time.time()
        self.single_sentence_text = "Worship the Nine, do your duty, and heed the commands of the saints and priests."
        self.expected_sentiment_single_sentence = 0.296  # todo hardcoded

        self.mock_chat_id_1 = "1a"
        self.message_obj = models.Message(
            time_sent=self.mock_bilateral_timestamp,
            sender=self.user_data.lookup_obj(self.akatosh_id),
            chat_id=self.mock_chat_id_1,
            text=self.single_sentence_text)

        # Instantiate mock multi-sentence message.
        self.multisentence_text = "I'm Akatosh blah blah blah. Lord Akatosh lends you his might. When your own strength fails you, trust in the Nine."
        self.expected_sentiment_multisentence = 0.092  # todo hardcoded

        self.mock_chat_id_2 = "2a"
        self.multisentence_message_obj = models.Message(
            time_sent=time.time(),
            sender=self.user_data.lookup_obj(self.akatosh_id),
            chat_id=self.mock_chat_id_2,
            text=self.multisentence_text)