def run_scenario(self):

        # Run tests.
        self.assertTrue(scenario_def['tests'], "tests cannot be empty")
        for test in scenario_def['tests']:
            self.init_db(scenario_def['data'], test)

            # Run GridFs Operation.
            operation = self.str_to_cmd[test['act']['operation']]
            args = test['act']['arguments']
            extra_opts = args.pop("options", {})
            if "contentType" in extra_opts:
                extra_opts["metadata"] = {
                    "contentType": extra_opts.pop("contentType")}

            args.update(extra_opts)

            converted_args = dict((camel_to_snake(c), v)
                                  for c, v in args.items())

            error = None
            try:
                result = operation(**converted_args)

                if 'download' in test['act']['operation']:
                    result = Binary(result.read())
            except Exception as exc:
                error = exc

            self.init_expected_db(test, result)

            # Asserts.
            errors = {"FileNotFound": NoFile,
                      "ChunkIsMissing": CorruptGridFile,
                      "ExtraChunk": CorruptGridFile,
                      "ChunkIsWrongSize": CorruptGridFile,
                      "RevisionNotFound": NoFile}

            if test['assert'].get("error", False):
                self.assertIsNotNone(error)
                self.assertTrue(isinstance(error,
                                           errors[test['assert']['error']]))
            else:
                self.assertIsNone(error)

            if 'result' in test['assert']:
                if test['assert']['result'] == 'void':
                    test['assert']['result'] = None
                self.assertEqual(result, test['assert'].get('result'))

            if 'data' in test['assert']:
                # Create alphabetized list
                self.assertEqual(
                    set(self.sorted_list(self.db.fs.chunks, True)),
                    set(self.sorted_list(self.db.expected.chunks, True)))

                self.assertEqual(
                    set(self.sorted_list(self.db.fs.files, False)),
                    set(self.sorted_list(self.db.expected.files, False)))
Example #2
0
    def __new__(cls, value):
        # return None and Binary() unmolested
        if value is None or isinstance(value, Binary):
            return value

        # bytearray is marked as MONGO_BLOB_BYTES
        if isinstance(value, bytearray):
            return Binary.__new__(cls, bytes(value), MongoBlob.MONGO_BLOB_BYTES)

        # return non-strings as Binary(), eg: PY3 bytes()
        if not isinstance(value, basestring):
            return Binary(value)

        # if string is encodable as UTF-8, then return as string
        try:
            value.encode('utf-8')
            return value
        except:
            # string which can not be UTF-8 encoded, eg: pickle strings
            return Binary.__new__(cls, value, MongoBlob.MONGO_BLOB_NON_UTF8_STR)
Example #3
0
    def test_validation_succeeds(self):
        """Ensure that valid values can be assigned to binary fields.
        """
        class AttachmentRequired(Document):
            blob = BinaryField(required=True)

        class AttachmentSizeLimit(Document):
            blob = BinaryField(max_bytes=4)

        attachment_required = AttachmentRequired()
        with pytest.raises(ValidationError):
            attachment_required.validate()
        attachment_required.blob = Binary(six.b("\xe6\x00\xc4\xff\x07"))
        attachment_required.validate()

        _5_BYTES = six.b("\xe6\x00\xc4\xff\x07")
        _4_BYTES = six.b("\xe6\x00\xc4\xff")
        with pytest.raises(ValidationError):
            AttachmentSizeLimit(blob=_5_BYTES).validate()
        AttachmentSizeLimit(blob=_4_BYTES).validate()
Example #4
0
def get_image_document(image_url):
    from bson import Binary
    if len(image_url) == 0:
        return None
    try:

        response = requests.get(image_url, timeout=1)
        if response.status_code == 200:
            result = {}
            image_data = Binary(response.content)
            img_name = image_url.split('/')[-1]
            result["image_name"] = img_name
            result["image_data"] = image_data
            result["image_url"] = image_url
            return result

    except Exception:
        logging.error(
            "Exception in downloading image from url : {}".format(image_url))

    return None
Example #5
0
 def post(self, user_id, job_id):
     user_id = str(user_id)
     if user_id == current_identity._get_current_object().id:
         f = request.files.get('imagefile')
         if not f:
             abort(401)
         if f.filename == '':
             abort(401)
         content_type = f.content_type
         # with tempfile.NamedTemporaryFile() as fp:
         #     f.save(fp.name)
         #     fp.seek(0, 0)
         image_data = Binary(f.stream.read())
         update_result = self.jobsDAO.update_images(user_id, job_id,
                                                    image_data, f.filename,
                                                    content_type)
         if update_result:
             result = self.jobsDAO.find_job(user_id, job_id)
             return jsonify(results=result.__dict__)
         else:
             abort(401)
 def storeObj(self,
              pickleobj,
              zone,
              acc,
              preobj,
              pca,
              name='{0}/{1}/{2}{3}'.format(datetime.datetime.now().day,
                                           datetime.datetime.now().month,
                                           datetime.datetime.now().year,
                                           datetime.datetime.now().hour)):
     db = self.client.picklestore
     col = db['zone' + str(zone)]
     dic = {
         '_id': name,
         'obj': Binary(pickleobj),
         'accuracy': acc,
         'preprocessing': preobj,
         'zone': zone,
         'PCA': pca
     }
     col.insert(dic)
Example #7
0
def save_image(parsed, index: str) -> Binary:
    cf = CanvasFrame()
    tc = TreeWidget(cf.canvas(), parsed, xspace=40, yspace=40)
    tc['node_font'] = 'arial 20 bold'
    tc['leaf_font'] = 'arial 20 bold'
    tc['node_color'] = '#005990'
    tc['leaf_color'] = '#3F8F57'
    tc['line_color'] = '#175252'
    cf.add_widget(tc, 50, 50)

    cf.print_to_file(os.path.join(trees_path, f'tree_{index}.ps'))
    cf.destroy()

    os.system(f'convert {trees_path}/tree_{index}.ps '
              f'{trees_path}/tree_{index}.png')
    os.remove(f'{trees_path}/tree_{index}.ps')

    with open(f'{trees_path}/tree_{index}.png', 'rb') as tree_img:
        tree_bin = Binary(tree_img.read())

    return tree_bin
Example #8
0
 def run(self):
     while True:
         current_url = redis_client.lpop('m_sohu_task')
         while not current_url:
             current_url = redis_client.lpop('m_sohu_task')
         self.spider.status = SpiderStatus.WORKING
         current_url = current_url.decode('utf-8')
         if not redis_client.sismember('visited_urls', current_url):
             redis_client.sadd('visited_urls', current_url)
             html_page = self.spider.fetch(current_url)
             if html_page not in [None, '']:
                 hasher = hasher_proto.copy()
                 hasher.update(current_url.encode('utf-8'))
                 doc_id = hasher.hexdigest()
                 if not sohu_data_coll.find_one({'_id': doc_id}):
                     sohu_data_coll.insert_one({
                         '_id': doc_id,
                         'url': current_url,
                         'page': Binary(zlib.compress(pickle.dumps(html_page)))
                     })
                 self.spider.parse(html_page)
         self.spider.status = SpiderStatus.IDLE
Example #9
0
def setupPythonDB(sdf):
    """
    Inputs documents from SDF file into a Python set.
    :param sdf: path to an SDF file.
    :return: The resulting set.
    """
    data = []
    number_added = 0
    for rdmol in Chem.ForwardSDMolSupplier(sdf):
        if rdmol is None:
            continue
        hash = utils.HASH_FUNCTIONS['inchikey']
        document = {
            'index': hash(rdmol),
            'smiles': Chem.MolToSmiles(rdmol),
            'rdmol': Binary(rdmol.ToBinary()),
            'registration_setting': 'standard_setting'
        }
        data.append(document)
        number_added += 1
    print("{} molecules successfully imported".format(number_added))
    return data
Example #10
0
def run(audio_path: str = 'test.wav',
        min_length: int = 1000,
        db_threshold: int = -32,
        speech_api: str = 'sphinx') -> list:
    text = speech_to_text(audio_path,
                          min_length=min_length,
                          db_threshold=db_threshold,
                          speech_api=speech_api)
    print(f'Speech to text: {" ".join(t for t, a in text)}\n')

    for text_audio in text:
        results = dict()

        sentence, chunk = text_audio
        index = chunk.split('_')[1].split('.')[0]
        with open(chunk, 'rb') as chunk_bin:
            results['audio_bin'] = Binary(chunk_bin.read())

        results.update(parse(sentence, index=index))

        results['created'] = datetime.now()
        db.texts.insert_one(results)
Example #11
0
def documents():
    form = UploadForm(method='POST')
    user_id = session.get('user_id')
#    with open('file.pdf', 'wb+') as f:
#        cursor = db.documents.find()
#        k = 0
#        for i in cursor:
#            if k == 2:
#                f.write(i['file'])
#            k += 1
    if form.submit.data and form.validate_on_submit():
        print(form.doc_type.data)
        filename = secure_filename(form.file.data.filename)
        doc_type = form.doc_type.data
        bytes_file = form.file.data.read()
        curr_dir = os.getcwd()
        dir_path = curr_dir + "/static/client/" + user_id + "/" # appended / at the end of str
        if not os.path.exists(dir_path):
            # do not need to change dir_path here 
            os.mkdir(dir_path)
        with open(dir_path + doc_type +'.pdf', 'wb+') as f:
            f.write(bytearray(bytes_file))

        new_doc_for_mongo = {
            'user_id' : ObjectId(user_id),
            'filename' : filename,
            'doc_type' : doc_type,
            'file' : Binary(bytes_file)
            }
        db.documents.remove({'$and' : [{'user_id' : ObjectId(user_id)},{'doc_type' : doc_type}]})
        db.documents.insert_one(new_doc_for_mongo)
        form.file.data = ''
        return redirect(url_for('documents'))
    documents = list(db.documents.find({'user_id' : ObjectId(user_id)}))

    return render_template('documents.html', form=form, documents=documents)
Example #12
0
    def store_submission_attachments(self, course_id, submission, attachments):
        course_id = str(course_id)
        assignment_id = str(submission['assignment_id'])

        submissions_collection = self.client[course_id][assignment_id]
        query = {'user_id': submission['user_id']}

        existing_submission = submissions_collection.find_one(query)

        att_dict = {}
        count = 0

        # keys on mongo can't have . or some special characters, so flatten out a bit
        for (k, v) in attachments.iteritems():
            att_dict[str(count)] = {}
            att_dict[str(count)]['filename'] = k
            # att_dict[str(count)]['contents'] = content_decode(v)
            att_dict[str(count)]['contents'] = Binary(v)
            count += 1

        existing_submission['attachment-files'] = att_dict
        existing_submission['assignment_id'] = submission['assignment_id']

        submissions_collection.update(query, existing_submission)
Example #13
0
	def POST(self):
		# Invoke the service worker code
		try:
			db = self.client.dbgenerals

			def getNextSequenceValue(sequenceName):
				sequenceDocument = db.counters.find_and_modify(
					query={'_id': sequenceName},
					update={"$inc": {'sequence_value': 1}},
					new=True,
					upsert=False
				)
				return sequenceDocument['sequence_value']

			rawData = cherrypy.request.json

			document = {
				"_id": getNextSequenceValue("genid"),
				"first_name": rawData['first_name'],
				"last_name": rawData['last_name'],
				"state": rawData['state'],
				"country": rawData['country'],
				"bio": rawData['bio'],
				"picture": Binary(base64.b64decode(rawData['picture'])),
			}

			db.igenerals.insert(document)

			cherrypy.response.headers['Content-Type'] = "application/json; charset=utf-8"
			response = {'res': 1}
			return response

		except (Exception) as e:
			print "Mongo Error: %s" % str(e)
			response = {'res': 0}
			return response
Example #14
0
 def _encrypt(self, data):
     return Binary(aes_encrypt(self.key_list[0], data))
 def setCurrentObj(self, obj, zone, name,preobj,pca,acc):
     db = self.client.picklestore
     col = db['currentWrkObj']
     col.update({'_id': zone}, {'$set': {'_id': zone, 'obj': Binary(obj), 'name': name,'preprocessing':preobj,'PCA':pca,'accuracy':acc}}, upsert=True)
 def direction_in_database(self, collection, direction_hash):
     search_val = {"directions_hash": Binary(data=direction_hash)}
     cursor = self.search_collection(collection,
                                     search_val=search_val,
                                     limit=1)
     return cursor.count() > 0
Example #17
0
def get_symbol_alive_shas(symbol, versions_coll):
    return set(
        Binary(x) for x in versions_coll.distinct(FW_POINTERS_REFS_KEY,
                                                  {'symbol': symbol}))
Example #18
0
    def put(self):
        JWT_userId = get_jwt_identity()
        JWT_token = get_raw_jwt()
        #read mfcc features from resource
        args = parser.parse_args()
        #mfcc = np.array(args['mfcc'])
        #basic sanity check
        if len(args['mfcc']) < 1 or len(args['energy']) < 1 or len(
                args['mfcc']) != len(args['energy']):
            return {
                'error': 'data malformed - check the API specification'
            }, 400

        db_result = userData.find_one({'userId': JWT_userId}, {
            "_id": 1,
            "trainLog": 1
        })
        if db_result == None:
            return {'error': 'this is unexpected, can not find user data'}, 500

        #get trainLog and add new data
        if 'trainLog' in db_result:
            train_log = db_result['trainLog']
        else:
            train_log = []
        train_log_item = {
            'timestamp': time(),
            'session': JWT_token['jti'],
            'data': request.json
        }
        train_log.append(train_log_item.copy())
        #trim if log too big
        if len(train_log) > TRAINLOGSIZE:
            train_log.pop(0)

        feature_data = extract_features(train_log)

        #calculate training progress
        api_training_progress = int(
            100 * len(train_log) /
            TRAINEDSIZE)  #fully traind when TRAINEDSIZE digits recorded.

        #build summary trainlog returned in api
        train_log_summary = []
        train_log_summary_item = {}
        for log_entry in train_log:
            train_log_summary_item['datetime'] = str(
                datetime.datetime.fromtimestamp(
                    log_entry['timestamp']).isoformat())
            train_log_summary_item['digit'] = log_entry['data']['digit']
            train_log_summary.append(train_log_summary_item.copy())

        # create model
        gmm = mixture.GaussianMixture(N_GMMCOMPONENTS, covariance_type='full')

        # TODO/check
        #prune mfcc file if too large
        #fit (warm start may help)

        tTime = time()
        gmm.fit(feature_data['train_data'])
        tTime = time() - tTime

        #score
        if len(feature_data['test_data']):
            testResult = gmm.score_samples(feature_data['test_data'])
            scoreThreshold = np.average(testResult) - np.std(testResult)
            resultReference = round(
                100 * len(testResult[testResult > scoreThreshold]) /
                len(testResult))  # % of matched frames in test data set
        else:
            scoreThreshold = 0
            resultReference = 0

        #Update db - gmm pickle / cache stats / train_log
        userData.update_one({'userId': JWT_userId}, {
            '$set': {
                'gmmPickleStore': Binary(pickle.dumps(gmm)),
                'resultReference': resultReference,
                'scoreThreshold': scoreThreshold,
                'trainDataLength': len(feature_data['train_data']),
                'testDataLength': len(feature_data['test_data']),
                'trainProgress': api_training_progress,
                'trainLog': train_log
            }
        })

        reply = {
            'training_progress': api_training_progress,
            'train_data_length': len(feature_data['train_data']),
            'test_data_length': len(feature_data['test_data']),
            'training_time': tTime,
            'training_log': train_log_summary[::-1],
            'score_threshold': scoreThreshold,
            'result_reference': resultReference
        }
        return reply, 200
Example #19
0
    def to_mongo(self, value):
        pass

        return Binary(value.public_bytes(serialization.Encoding.DER))
Example #20
0
def load_custom(out_fn):
    from numpy import genfromtxt
    import csv
    import bz2
    import pickle
    import gridfs
    from bson import Binary
    from pymongo import MongoClient
    from datasketch import MinHash

    client = MongoClient()
    db = client['vtest102']
    fs = gridfs.GridFS(db)

    data = genfromtxt('minhash_big.csv', delimiter=',')

    X_train, X_test = train_test_split(data, test_size=0.1)

    with open('minhash_big_mapping.csv', "w") as output:
        counter = 0
        writer = csv.writer(output, lineterminator='\n')
        for file in fs.find({'type': 'test_row'}):
            fs.delete(file._id)
        for file in fs.find({'type': 'train_row'}):
            fs.delete(file._id)
        print('number of train rows before writing(should be 0): ' +
              str(fs.find({
                  'type': 'train_row'
              }).count()) + '\n')

        for row in X_test:
            writer.writerow(row)
            id = -int(row[0])
            group = fs.find_one({'type': 'group', 'group_id': str(id)})
            members = pickle.loads(bz2.decompress(group.read()))
            m = MinHash(num_perm=128)
            for i in members:
                m.update(str(i).encode('utf-8'))
            fs.put(Binary(bz2.compress(pickle.dumps(m))),
                   filename="test_row" + str(id),
                   group_id=str(id),
                   type='test_row',
                   version='vtest102')
            print(counter)
            counter += 1

        for row in X_train:
            id = -int(row[0])
            group = fs.find_one({'type': 'group', 'group_id': str(id)})
            members = pickle.loads(bz2.decompress(group.read()))
            m = MinHash(num_perm=128)
            for i in members:
                m.update(str(i).encode('utf-8'))
            fs.put(Binary(bz2.compress(pickle.dumps(m))),
                   filename="train_row" + str(id),
                   group_id=str(id),
                   type='train_row',
                   version='vtest102')
            print(counter)
            counter += 1

    X_train_cut = np.delete(X_train, 0, axis=1)
    X_test_cut = np.delete(X_test, 0, axis=1)
    write_output(X_train_cut, X_test_cut, out_fn, 'jaccard')
Example #21
0
 def to_son(self, value):
     return Binary(value)
Example #22
0
    def insert(content):
        b_content = Binary(content)

        mc = MongoClient()
        c = mc['test']['image']
        c.insert_one({"img_src": 'http://', "img": b_content})
Example #23
0
    u"address": [{
        u"street": text_type(address.street),
        u"city": text_type(address.city),
        u"state": text_type(address.state)
    } for address in user_el.address]
} for user_el in users]

email = Email(email="*****@*****.**")
email_dict_id = {"id": email.pk}
email_dict_email = {"id": email.pk}

article = Article(pk=ObjectId(),
                  user=user,
                  title="Test Tile",
                  date=now - timedelta(microseconds=now.microsecond % 1000),
                  body=Binary(b"\x00\x01\x02\x03\x04"),
                  uuid=uuid5(NAMESPACE_DNS, "This is a test"),
                  addition=ArticleMetaData(seller=Seller(
                      name="Musle Woman",
                      address=Address(street="Test musle street",
                                      city="Test musle city",
                                      state="Test musle state")),
                                           price=1000000))
article_dict = {
    u"id": text_type(article.pk),
    u"user": text_type(user_dict["id"]),
    u"title": text_type(article.title),
    u"date": text_type(article.date.isoformat()),
    u"body": {
        u"data": text_type(b64encode(article.body).decode("utf-8")),
        u"type": article.body.subtype
Example #24
0
 def decode_mismatch_id(cls, _json):
     doc = json.loads(_json)
     return Binary(base64.b64decode(doc['_id']), 0)
Example #25
0
def extract_frames(video_file, video_timestamp_str):
    client = MongoClient()

    # specify the collection for storing frames
    db = client[DATABASE_NAME]
    collection = db[COLLECTION_NAME]
    collection.remove()
    collection.ensure_index([( 'created_at', 1)])
    
    vidcap = cv2.VideoCapture(video_file)
    video_full_filename, video_file_ext = video_file.split(".")    
    video_filename = os.path.basename(video_full_filename)
    video_timestamp = datetime.datetime.strptime(video_timestamp_str, "%Y-%m-%d %H:%M:%S")
    #print("video_timestamp: %s" % video_timestamp)
   
    fps = vidcap.get(cv2.CAP_PROP_FPS)
    ms_per_frame = int(1000000 / fps)
    print('\n')
    print("################################################################################")
    print("#############     VIDEO INFORMATION      #######################################")
    print("################################################################################")
    print("# {:25}| {:<50}#" .format ("Input video:", video_file))
    print("# {:25}| {:<50}#" .format ("Frame per second:", fps))
    print("# {:25}| {:<50}#" .format ("Microsecond per frame:", ms_per_frame))
    num_of_frames = vidcap.get(cv2.CAP_PROP_FRAME_COUNT)
    print("# {:25}| {:<50}#" .format ("Total number of frames:", int(num_of_frames)))
    print("# {:25}| {:<50}#" .format ("Video starts from:", video_timestamp_str))
    print("################################################################################")
    print('\n')
    print("Extracting frames from video ......")
    bar = progressbar.ProgressBar(maxval=num_of_frames, widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
    bar.start()
    cnt = 0
    show_cnt = 0
    image_samples = {}
    success,image = vidcap.read()
    posts = []
    start_datetime = datetime.datetime.now()
    while success:
        frame_name = video_filename + "_" + str(cnt).zfill(5) + ".png"
        
        # save the snapshots to display
        if cnt % SNAPSHOT_INTERVAL == 0 and show_cnt < NUMBER_OF_SNAPSHOTS:  
            image_samples[frame_name]=image
            show_cnt = show_cnt + 1

        # insert the image into mongodb
        frame_datetime = video_timestamp + datetime.timedelta(microseconds=ms_per_frame*cnt)
        image_data = Binary(cPickle.dumps(image, protocol=2))

        # ingest the frame to mongodb
        post = { 'sensor_id' : 1, 'video_name' : video_filename + '.' + video_file_ext, 'created_at' : frame_datetime , 'image' : image_data}
        posts.append(post)
        if(len(posts)%5==0):
            collection.insert_many(posts)
            posts = []
    
        # read the next frame
        success,image = vidcap.read()
        cnt = cnt + 1 
        bar.update(cnt)    
        
    if(len(posts)<>0):
        collection.insert_many(posts)
        posts = []  
    bar.finish()
    
    end_datetime = datetime.datetime.now()    
    time_taken = (end_datetime - start_datetime).total_seconds()
    print("\n=> %s image frames uploaded in %.2f seconds" % (int(num_of_frames),time_taken))
    print("=> %.2f image frames uploaded per seconds" % (num_of_frames/time_taken))
    
    print("\nDisplaying snapshots ...")
    # display the saved snapshots
    show_cnt = 0    
    winodow_positions = {}
    for frame_name in sorted(image_samples):
        x_pos = 50+(show_cnt%NUM_SNAPSHOTS_IN_A_ROW)*X_MOVEMENT
        y_pos = 50+((show_cnt)/NUM_SNAPSHOTS_IN_A_ROW)*Y_MOVEMENT
        #print('position: (%s,%s)' % (x_pos, y_pos))     
        move_window(frame_name, image_samples[frame_name], 10, 10, x_pos, y_pos, NUM_MOVE_INTERVALS)
        winodow_positions[frame_name] = (x_pos, y_pos)
        show_cnt = show_cnt + 1
        time.sleep(0.5)
    
    cv2.waitKey(0)
    print("Done\n")
    
    for frame_name in reversed(sorted(image_samples)):
        x_pos, y_pos = winodow_positions[frame_name]
        move_window(frame_name, image_samples[frame_name], x_pos, y_pos, 10,10, NUM_MOVE_INTERVALS, False)

    cv2.destroyAllWindows()
Example #26
0
 def to_mongo(self, value):
     return Binary(value)
def InsertCommodityUpdate(data):
    return col_updates.insert({
        'timestamp': data.timestamp,
        'payload': Binary(data.SerializeToString())
    })
Example #28
0
 def get_model(dataset: dict):
     dataset = Dataset(dataset['name'], pd.read_json(dataset['columns']),
                       Binary(b''), dataset['_id'])
     return dataset
    def run_scenario(self):

        # Run tests.
        self.assertTrue(scenario_def['tests'], "tests cannot be empty")
        for test in scenario_def['tests']:
            self.init_db(scenario_def['data'], test)

            # Run GridFs Operation.
            operation = self.str_to_cmd[test['act']['operation']]
            args = test['act']['arguments']
            extra_opts = args.pop("options", {})
            if "contentType" in extra_opts:
                extra_opts["metadata"] = {
                    "contentType": extra_opts.pop("contentType")
                }

            args.update(extra_opts)

            converted_args = dict(
                (camel_to_snake(c), v) for c, v in args.items())

            error = None
            try:
                result = operation(**converted_args)

                if 'download' in test['act']['operation']:
                    result = Binary(result.read())
            except Exception as exc:
                error = exc

            self.init_expected_db(test, result)

            # Asserts.
            errors = {
                "FileNotFound": NoFile,
                "ChunkIsMissing": CorruptGridFile,
                "ExtraChunk": CorruptGridFile,
                "ChunkIsWrongSize": CorruptGridFile,
                "RevisionNotFound": NoFile
            }

            if test['assert'].get("error", False):
                self.assertIsNotNone(error)
                self.assertIsInstance(error, errors[test['assert']['error']],
                                      test['description'])
            else:
                self.assertIsNone(error)

            if 'result' in test['assert']:
                if test['assert']['result'] == 'void':
                    test['assert']['result'] = None
                self.assertEqual(result, test['assert'].get('result'))

            if 'data' in test['assert']:
                # Create alphabetized list
                self.assertEqual(
                    set(self.sorted_list(self.db.fs.chunks, True)),
                    set(self.sorted_list(self.db.expected.chunks, True)))

                self.assertEqual(
                    set(self.sorted_list(self.db.fs.files, False)),
                    set(self.sorted_list(self.db.expected.files, False)))
Example #30
0
File: app.py Project: Conceptx/OJA
def apply():
    if request.method == 'POST':

        name = '''{} {}'''.format(request.form.get('firstname'),
                                  request.form.get('surname'))
        contacts = '''{} , {} , {}  '''.format(request.form.get('phone1'),
                                               request.form.get('phone2'),
                                               request.form.get('address'))
        email = request.form.get('email')
        sex = request.form.get('sex')

        current = date.today()
        dateOfBirth = request.form.get('DOB')
        cd = current.strftime('%Y, %m, %d')
        currentDate = cd.split(",")
        dob = dateOfBirth.split("-")
        c = []
        d = []

        for i in currentDate:
            c.append(int(i))
        for i in dob:
            d.append(int(i))

        #dynamic entry of age
        age = int((date(c[0], c[1], c[2]) - date(d[0], d[1], d[2])).days / 365)

        #qualifications

        qualifications = ""
        institution = ""
        workexperience = ""
        file = request.files.get('cv')

        cv = Binary(bytes(file.read()))

        if cv != "" or cv is not None:
            comments = "CV & Certificates attached"

        #applyFor = post
        status = "new"

        for i in range(1, int(request.form.get('numberOfQualifications')) + 1):
            qualifications += "{}. ".format(str(i)) + request.form.get(
                'qualification{}'.format(i)) + ". "
            institution += "{}. ".format(str(i)) + request.form.get(
                'awardingInstitute{}'.format(i)) + ". "
        for i in range(1,
                       int(request.form.get('numberOfWorkExperiences')) + 1):
            workexperience += "{}. Worked at {} as {} since {}. ".format(
                i, request.form.get('organisation{}'.format(i)),
                request.form.get('position{}'.format(i)),
                request.form.get('timeframe{}'.format(i)))

            user = db.applicants.find_one({
                'National_id':
                request.form.get('nationalid'),
                'post':
                postSession
            })

            if user == None:
                db.applicants.insert({
                    'name':
                    name,
                    'contact details':
                    contacts,
                    'sex':
                    sex,
                    'age':
                    age,
                    'National_id':
                    request.form.get('nationalid'),
                    'academic qualifications':
                    qualifications,
                    'awarding institute':
                    institution,
                    'work experience':
                    workexperience,
                    'curriculum vitae':
                    cv,
                    'comments':
                    comments,
                    'status':
                    status,
                    'post':
                    postSession,
                    'email':
                    email
                })
                flash('Application For Vacancy Was Successful')
            else:
                flash('Application For Vacancy Already Exists')
                return redirect(url_for('home'))
    return render_template('applicationform.html')
Example #31
0
    Color: str,
    timedelta: lambda td: td.total_seconds(),
    Decimal: Decimal128,
    deque: list,
    IPv4Address: str,
    IPv4Interface: str,
    IPv4Network: str,
    IPv6Address: str,
    IPv6Interface: str,
    IPv6Network: str,
    SecretBytes: SecretBytes.get_secret_value,
    SecretStr: SecretStr.get_secret_value,
    Enum: lambda o: o.value,
    PurePath: str,
    Link: lambda l: l.ref,
    bytes: lambda b: b if isinstance(b, Binary) else Binary(b),
    UUID: lambda u: bson.Binary.from_uuid(u),
}


class Encoder:
    """
    BSON encoding class
    """
    def __init__(
        self,
        exclude: Union[AbstractSet[Union[str, int]],
                       Mapping[Union[str, int], Any], None] = None,
        custom_encoders: Dict[Type, Callable] = None,
        by_alias: bool = True,
        to_db: bool = False,
Example #32
0
def save(y):
    # serialize array y
    y = Binary(pickle.dumps(y, protocol=2))
    return dqndb.insert({'content': y})