Exemplo n.º 1
0
def main(args):
    env_info = get_sys_env()
    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'

    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(args.cfg)
    val_dataset = cfg.val_dataset
    if not val_dataset:
        raise RuntimeError(
            'The verification dataset is not specified in the configuration file.'
        )

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    model = cfg.model
    image_list, image_dir = get_image_list(args.image_path)
    logger.info('Number of predict images = {}'.format(len(image_list)))

    test_config = get_test_config(cfg, args)
    config_check(cfg, val_dataset=val_dataset)

    predict(model,
            model_path=args.model_path,
            val_dataset=val_dataset,
            image_list=image_list,
            image_dir=image_dir,
            save_dir=args.save_dir,
            **test_config)
Exemplo n.º 2
0
def on_message(slack, config, message):
    # Respond to all types of messages.
    channel_id = message['channel']
    if channel_id[0] in ['C', 'G']:
        # Channel/group message, make sure foosbot is being addressed.
        if not re.search("<@{}>".format(config['bot_id']), message['text']):
            return
        pass
    sender = message['user']
    text = message['text'].lower()

    context = core.Context(slack=slack,
                           channel=channel_id,
                           sender=sender,
                           bot_id=config['bot_id'],
                           users=config['users'],
                           matches=loldb.getmatches())

    # Look for HELP
    matches_help = re.search(help_command, text)
    if matches_help:
        core.reply_with_message(help_message, context)
        return

    # Look for RANK
    matches_rank = re.search(rank_command, text)
    if matches_rank:
        core.rank(context)
        return

    # Look for STATS
    matches_stats = re.search(stats_command, text)
    if matches_stats:
        core.stats(users=users_in(matches_stats.group('who')), context=context)
        return

    # Look for RESULTS
    matches_results = re.search(results_command, text)
    if matches_results:
        core.results(users=users_in_teams(matches_results.group('who')),
                     score1=matches_results.group('score1'),
                     score2=matches_results.group('score2'),
                     context=context)
        return

    # Look for PREDICT
    matches_predict = re.search(predict_command, text)
    if matches_predict:
        core.predict(users=users_in_teams(matches_predict.group('who')), context=context)
        return

    # Look for DELETE
    matches_delete = re.search(delete_command, text)
    if matches_delete:
        core.delete(game_id=matches_delete.group('what'), context=context)
        return

    core.reply_with_message(didnt_understand_message, context)
Exemplo n.º 3
0
 def __predict(self):
     self.pred = predict(self.model,
                         self.img,
                         self.clicks,
                         if_sis=self.if_sis,
                         if_cuda=self.if_cuda)
     self.merge = self.__gene_merge(self.pred, self.img, self.clicks)
     self.__update()
Exemplo n.º 4
0
def main(args):
    env_info = get_sys_env()
    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'

    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(args.cfg)
    val_dataset = cfg.val_dataset
    if val_dataset is None:
        raise RuntimeError(
            'The verification dataset is not specified in the configuration file.'
        )
    elif len(val_dataset) == 0:
        raise ValueError(
            'The length of val_dataset is 0. Please check if your dataset is valid'
        )

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    model = cfg.model
    transforms = val_dataset.transforms

    image_list, image_dir = get_image_list(args.image_path)
    if args.trimap_path is None:
        trimap_list = None
    else:
        trimap_list, _ = get_image_list(args.trimap_path)
    logger.info('Number of predict images = {}'.format(len(image_list)))

    predict(model,
            model_path=args.model_path,
            transforms=transforms,
            image_list=image_list,
            image_dir=image_dir,
            trimap_list=trimap_list,
            save_dir=args.save_dir)
Exemplo n.º 5
0
def main(args):
    env_info = get_sys_env()
    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'

    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(args.cfg)
    val_dataset = cfg.val_dataset
    if not val_dataset:
        raise RuntimeError(
            'The verification dataset is not specified in the configuration file.'
        )

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    model = cfg.model
    transforms = val_dataset.transforms
    image_list, image_dir = get_image_list(args.image_path)
    logger.info('Number of predict images = {}'.format(len(image_list)))

    config_check(cfg, val_dataset=val_dataset)

    predict(model,
            model_path=args.model_path,
            transforms=transforms,
            thing_list=val_dataset.thing_list,
            label_divisor=val_dataset.label_divisor,
            stuff_area=val_dataset.stuff_area,
            ignore_index=val_dataset.ignore_index,
            image_list=image_list,
            image_dir=image_dir,
            save_dir=args.save_dir,
            threshold=args.threshold,
            nms_kernel=args.nms_kernel,
            top_k=args.top_k)
Exemplo n.º 6
0
def execute(tensor, density, para):
    startTime = time.clock()
    [numUser, numService, numTime] = tensor.shape
    rounds = para['rounds']
    logger.info('Data size: %d users * %d services * %d timeslices'\
     %(numUser, numService, numTime))
    logger.info('Run the algorithm for %d rounds: density = %.2f.' %
                (rounds, density))
    evalResults = np.zeros((rounds, len(para['metrics'])))
    timeResults = np.zeros((rounds, 1))

    for k in range(rounds):
        logger.info('----------------------------------------------')
        logger.info('%d-round starts.' % (k + 1))
        logger.info('----------------------------------------------')

        # remove the entries of data to generate trainTensor and testTensor
        (trainTensor, testTensor, indtrainTensor,
         indtestTensor) = removeTensor(tensor, density, k, para)
        logger.info('Removing data entries done.')
        # remove outliers from the testing data
        (testTensor,
         indtestTensor) = merge_test_outlier(testTensor, indtestTensor,
                                             para['outlier_fra'], para)
        logger.info('Merge outlier done.')

        # invocation to the prediction function
        iterStartTime = time.clock(
        )  # to record the running time for one round
        predictedTensor = core.predict(trainTensor, para)
        timeResults[k] = time.clock() - iterStartTime

        # calculate the prediction error
        evalResults[k, :] = cal_metric(testTensor, predictedTensor,
                                       indtestTensor)
        print(evalResults)

        logger.info('%d-round done. Running time: %.2f sec' %
                    (k + 1, timeResults[k]))
        logger.info('----------------------------------------------')

    outFile = '%savg_%sResult_density%.2f.txt' % (para['outPath'],
                                                  para['dataType'], density)
    saveResult(outFile, evalResults, timeResults, para)

    logger.info('Density = %.2f done. Running time: %.2f sec' %
                (density, time.clock() - startTime))
    logger.info('==============================================')
Exemplo n.º 7
0
def execute(tensor, density, para):
    startTime = time.clock()
    [numUser, numService, numTime] = tensor.shape
    rounds = para['rounds']
    logger.info('Data size: %d users * %d services * %d timeslices'\
    	%(numUser, numService, numTime))
    logger.info('Run the algorithm for %d rounds: density = %.2f.'%(rounds, density))
    evalResults = np.zeros((rounds, len(para['metrics']))) 
    timeResults = np.zeros((rounds, 2))
    
    for k in range(rounds):
		logger.info('----------------------------------------------')
		logger.info('%d-round starts.'%(k + 1))
		logger.info('----------------------------------------------')

		# remove the entries of data to generate trainTensor and testTensor
		(trainTensor, testTensor) = removeTensor(tensor, density, k, para)
		logger.info('Removing data entries done.')
		if para['dataType'] == 'rt':
			startSlice = int(numTime * (1 - para['slicesToTest']))
		else: # for case 'rel'
			startSlice = 0
		onlineTestTensor = testTensor[:, :, startSlice:]

		# invocation to the prediction function
		iterStartTime = time.clock() # to record the running time for one round             
		(onlinePredTensor, timeResults[k, :]) = core.predict(trainTensor, para) 
		
		# calculate the prediction error
		result = np.zeros((onlineTestTensor.shape[2], len(para['metrics'])))
		for i in xrange(onlineTestTensor.shape[2]):
			testMatrix = onlineTestTensor[:, :, i]
			predictedMatrix = onlinePredTensor[:, :, i]
			(testVecX, testVecY) = np.where(testMatrix)
			testVec = testMatrix[testVecX, testVecY]
			predVec = predictedMatrix[testVecX, testVecY]
			result[i, :] = errMetric(testVec, predVec, para['metrics'])		
		evalResults[k, :] = np.average(result, axis=0)

		logger.info('%d-round done. Running time: %.2f sec'%(k + 1, time.clock() - iterStartTime))
		logger.info('----------------------------------------------')

    outFile = '%savg_%sResult_%.2f.txt'%(para['outPath'], para['dataType'], density)
    saveResult(outFile, evalResults, timeResults, para)

    logger.info('Density = %.2f done. Running time: %.2f sec'
			%(density, time.clock() - startTime))
    logger.info('==============================================')
Exemplo n.º 8
0
def chatbot(txt):
    ##ECHO
   ##response = messaging_text
   #count = 0
   #my_ques_series = pd.Series(['What is your age?','What is your gender? 1 : Male 0 : Transgender -1 : Female','Do you have a family history of mental illness? 1 : yes 0 : no','If you have a mental health condition, do you feel that it interferes with your work? 0 : never 1 : rarely 2 : sometimes 3 : often','How many employees does your company or organization have? 1 : 1-5 2 : 6-25 3 : 26-100 4 : 100-500 5 : 500-1000 6: More than 1000','Do you work remotely (outside of an office) at least 50% of the time? 1 : yes 0 : no','Is your employer primarily a tech company/organization? 1 : yes 0 : no','Does your employer provide mental health benefits? 1 : yes 0 : don\'t know -1 : no','Do you know the options for mental health care your employer provides? 1 : yes 0 : not sure -1 : no','Has your employer ever discussed mental health as part of an employee wellness program? 1 : yes 0 : don\'t know -1 : no','Does your employer provide resources to learn more about mental health issues and how to seek help? 1 : yes 0 : don\'t know -1 : no','Is your anonymity protected if you choose to take advantage of mental health or substance abuse treatment resources? 1 : yes 0 : don\'t know -1 : no','How easy is it for you to take medical leave for a mental health condition? 0 : very easy 1 : somewhat easy 2 : don\'t know 3 : somewhat difficult 4 : very difficult','Do you think that discussing a mental health issue with your employer would have negative consequences? 1 : yes 0 : maybe -1 : no','Do you think that discussing a physical health issue with your employer would have negative consequences? 1 : yes 0 : maybe -1 : no','Would you be willing to discuss a mental health issue with your coworkers? 1 : yes 0 : some of them -1 : no','Would you be willing to discuss a mental health issue with your direct supervisor(s)? 1 : yes 0 : some of them -1 : no','Would you bring up a mental health issue with a potential employer in an interview?1 : yes 0 : maybe -1 : no','Would you bring up a physical health issue with a potential employer in an interview? 1 : yes 0 : maybe -1 : no','Do you feel that your employer takes mental health as seriously as physical health? 1 : yes 0 : don\'t know -1 : no','Have you heard of or observed negative consequences for coworkers with mental health conditions in your workplace? 1 : yes 0 : no','Thanks! Calculating...'])
   #allval = []
   
   
   global allval, od, x
   response = None
   entity, value = wit_response(txt)
   print(entity, value)
   allval[x] = value
   x = x + 1
   tup = ()
   
   #if len(allval) < 24:
   if entity == 'greetings':
       response = "Hi, Welcome to Knowmad! We will do a small survey to predict how work related stress could be affecting your mental health. Shall we begin?"
       #global count
       #print(count, allval[count])
   elif entity == 'yes_no': #or entity == 'number':
       if value == 'yes': #or value != '':
           #global my_ques_series
           print(0, "First in Question list")
           response = od[0]
       else:
           response = "Okay maybe next time."
           
   elif entity == 'number' and len(allval) < 24:
       #value > -1 and value < 100:
       global count
       print(count, " in Question list")
       response = od[count]
       count = count + 1
       print(allval)
   elif len(allval) == 24:
       print("reached the end!")
       allval.pop(0)
       allval.pop(1)
       for key, value in allval.items():
           tup = tup + (value,)
       outcome = predict(tup)
       response = "The outcome is {}".format(str(outcome))
       
   return response
Exemplo n.º 9
0
    def inference():
        if request.method == 'OPTIONS':
            r = make_response("")
            for key, value in default_headers.items():
                r.headers.add(key, value)
            return r, 200
        if request.method == 'POST':
            response_dict = {}

            REST_instance = request.args.get('instance', default=None)
            if REST_instance == None or REST_instance == "":
                return make_response(
                    jsonify(
                        {'error': 'Missing (or empty) arg called "instance"'}),
                    400)

            if REST_instance not in instances:
                return make_response(
                    jsonify({
                        'error':
                        'Provided "instance" is not a recognized one.'
                    }), 400)

            if not request.json:
                request.get_json(force=True)  #force to get the payload as JSON
            request_content = request.json

            if "text" not in request_content:
                return make_response(
                    jsonify({'error': 'Missing "text" field in POST body.'}),
                    400)
            text = request_content["text"]
            if text == None or text == "":
                return make_response(
                    jsonify({
                        'error':
                        'Data field is empty: no "text" to process.'
                    }), 400)

            instance = instances[REST_instance]
            response_dict["prediction"] = core.predict(instance, text)

            response = jsonify(response_dict)
            for key, value in default_headers.items():
                response.headers.add(key, value)
            return response, 200
Exemplo n.º 10
0
def execute(tensor, density, para):
    startTime = time.clock()
    startTime = time.clock()
    [numUser, numService, numTime] = tensor.shape
    rounds = para['rounds']
    logger.info('Data size: %d users * %d services * %d timeslices'\
    	%(numUser, numService, numTime))
    logger.info('Run the algorithm for %d rounds: density = %.2f.'%(rounds, density))
    evalResults = np.zeros((rounds, len(para['metrics']))) 
    timeResults = np.zeros((rounds, 1))
    
    for k in range(rounds):
		logger.info('----------------------------------------------')
		logger.info('%d-round starts.'%(k + 1))
		logger.info('----------------------------------------------')

		# remove the entries of data to generate trainTensor and testTensor
		(trainTensor, testTensor) = removeTensor(tensor, density, k, para)
		logger.info('Removing data entries done.')

		# invocation to the prediction function
		iterStartTime = time.clock() # to record the running time for one round             
		predictedTensor = core.predict(trainTensor, para) 
		timeResults[k] = time.clock() - iterStartTime

		# calculate the prediction error
		result = np.zeros((numTime, len(para['metrics'])))
		for i in range(numTime):
			testMatrix = testTensor[:, :, i]
			predictedMatrix = predictedTensor[:, :, i]
			(testVecX, testVecY) = np.where(testMatrix)
			testVec = testMatrix[testVecX, testVecY]
			predVec = predictedMatrix[testVecX, testVecY]
			result[i, :] = errMetric(testVec, predVec, para['metrics'])		
		evalResults[k, :] = np.average(result, axis=0)

		logger.info('%d-round done. Running time: %.2f sec'%(k + 1, timeResults[k]))
		logger.info('----------------------------------------------')

    outFile = '%savg_%sResult_%.2f.txt'%(para['outPath'], para['dataType'], density)
    saveResult(outFile, evalResults, timeResults, para)

    logger.info('Density = %.2f done. Running time: %.2f sec'
			%(density, time.clock() - startTime))
    logger.info('==============================================')
Exemplo n.º 11
0
def execute(matrix, locGroup, density, para):
    startTime = time.clock()
    numService = matrix.shape[1]
    numUser = matrix.shape[0]
    rounds = para['rounds']
    logger.info('Data matrix size: %d users * %d services' %
                (numUser, numService))
    logger.info('Run the algorithm for %d rounds: matrix density = %.2f.' %
                (rounds, density))
    evalResults = np.zeros((rounds, len(para['metrics'])))
    timeResults = np.zeros((rounds, 1))

    for k in range(rounds):
        logger.info('----------------------------------------------')
        logger.info('%d-round starts.' % (k + 1))
        logger.info('----------------------------------------------')

        # remove the entries of data matrix to generate trainMatrix and testMatrix
        # use k as random seed
        (trainMatrix, testMatrix) = removeEntries(matrix, density, k)
        logger.info('Removing data entries done.')
        (testVecX, testVecY) = np.where(testMatrix)
        testVec = testMatrix[testVecX, testVecY]
        # read the training data, i.e., removed matrix

        # invocation to the prediction function
        iterStartTime = time.clock(
        )  # to record the running time for one round
        predictedMatrix = core.predict(trainMatrix, locGroup, para)
        timeResults[k] = time.clock() - iterStartTime

        # calculate the prediction error
        predVec = predictedMatrix[testVecX, testVecY]
        evalResults[k, :] = errMetric(testVec, predVec, para['metrics'])

        logger.info('%d-round done. Running time: %.2f sec' %
                    (k + 1, timeResults[k]))
        logger.info('----------------------------------------------')

    outFile = '%s%sResult_%.2f.txt' % (para['outPath'], para['dataType'],
                                       density)
    saveResult(outFile, evalResults, timeResults, para)
    logger.info('Config density = %.2f done. Running time: %.2f sec' %
                (density, time.clock() - startTime))
    logger.info('==============================================')
Exemplo n.º 12
0
def execute(matrix, density, para):
    startTime = time.clock()
    numService = matrix.shape[1] 
    numUser = matrix.shape[0] 
    rounds = para['rounds']
    logger.info('Data matrix size: %d users * %d services'%(numUser, numService))
    logger.info('Run the algorithm for %d rounds: matrix density = %.2f.'%(rounds, density))
    
    numMetric = 0
    for metric in para['metrics']:
        if isinstance(metric, tuple):
            numMetric += len(metric[1])
        else:
            numMetric += 1
    evalResults = np.zeros((rounds, numMetric)) 
    timeResults = np.zeros((rounds, 1))
    	
    for k in range(rounds):
		logger.info('----------------------------------------------')
		logger.info('%d-round starts.'%(k + 1))
		logger.info('----------------------------------------------')

		# remove the entries of data matrix to generate trainMatrix and testMatrix
		# use k as random seed		
		(trainMatrix, testMatrix) = removeEntries(matrix, density, k) 
		logger.info('Removing data entries done.')

		# invocation to the prediction function
		iterStartTime = time.clock() # to record the running time for one round             
		predictedMatrix = core.predict(trainMatrix, para)
		predictedMatrix[trainMatrix > 0] = trainMatrix[trainMatrix > 0]
		timeResults[k] = time.clock() - iterStartTime

		# calculate the prediction error
		evalResults[k, :] = errMetric(matrix, testMatrix, predictedMatrix, para['metrics'])

		logger.info('%d-round done. Running time: %.2f sec'%(k + 1, timeResults[k]))
		logger.info('----------------------------------------------')

    outFile = '%s%sResult_%.2f.txt'%(para['outPath'], para['dataType'], density)
    saveResult(outFile, evalResults, timeResults, para)
    logger.info('Config density = %.2f done. Running time: %.2f sec'
			%(density, time.clock() - startTime))
    logger.info('==============================================')
Exemplo n.º 13
0
def main(args):
    env_info = get_sys_env()
    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'
    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(args.cfg)
    val_dataset = cfg.val_dataset
    if val_dataset is None:
        raise RuntimeError(
            'The verification dataset is not specified in the configuration file.'
        )
    elif len(val_dataset) == 0:
        raise ValueError(
            'The length of val_dataset is 0. Please check if your dataset is valid'
        )

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    model = cfg.model
    transforms = val_dataset.transforms

    alpha = predict(model,
                    model_path=args.model_path,
                    transforms=transforms,
                    image_list=[args.image_path],
                    trimap_list=[args.trimap_path],
                    save_dir=args.save_dir)

    img_ori = cv2.imread(args.image_path)
    bg = get_bg(args.bg_path, img_ori.shape)
    alpha = alpha / 255
    alpha = alpha[:, :, np.newaxis]
    com = alpha * img_ori + (1 - alpha) * bg
    com = com.astype('uint8')
    com_save_path = os.path.join(args.save_dir,
                                 os.path.basename(args.image_path))
    cv2.imwrite(com_save_path, com)
def prediction():
    ''' makes prediction (uploads file, creates spectrogram, applies neural 
    networks and displays result on result page) '''
    file = None
    file = request.files['file']

    if not file or file.filename == '':
        error = 'No selected file'
        return render_template(ERROR, error=error)

    if file and allowed_file(file.filename):

        image, fig = create_spectrogram(file)
        pred = predict(model, image)
        result = create_result(pred, classes)

        pngImage = io.BytesIO()
        FigureCanvas(fig).print_png(pngImage)
        spectrogram = "data:image/png;base64,"
        spectrogram += base64.b64encode(pngImage.getvalue()).decode('utf8')

        if result['probability'] > 74:
            bird_path = create_bird_path(result['bird'])
            probability = str(result['probability'])
            bird_type = result['bird']
            name, en_name, desc = get_bird_data(bird_type)

            return render_template(RESULT,
                                   image=spectrogram,
                                   bird=bird_path,
                                   probability=probability,
                                   bird_type=bird_type,
                                   name=name,
                                   en_name=en_name,
                                   desc=desc)

        else:
            return render_template(NOT_A_BIRD, image=spectrogram)

    else:
        error = 'Wrong file format'
        return render_template(ERROR, error=error)
Exemplo n.º 15
0
def upload():
    blacklist = open(get_full_path(blacklist_dir)).read()
    if request.method == 'POST':
        file = request.files['file']
        session.clear()
        if file and allowed_file(file.filename):
            filename = get_file_name(file.filename)
            if black_checker(filename):
                return render_template(
                    'base.html',
                    blacklist=open(blacklist_dir).read(),
                    message='This sender has been blacklisted!!')
            else:
                content = file.read()
                detect = predict(content)
                if detect == 'fraud':
                    update_blacklist(filename)
                    return render_template(
                        'base.html',
                        content=content,
                        blacklist=open(blacklist_dir).read(),
                        message=' This is a fraudulent mail.'
                        'Therefore,the mail has been updated on the blacklist')
                elif detect == 'ham':
                    render_template('base.html',
                                    blacklist=blacklist,
                                    content=content)
                elif detect == 'spam':
                    return render_template(
                        'base.html',
                        blacklist=blacklist,
                        message=
                        'This mail is spam, therefore it will be sent to spam folder'
                    )
                redirect(
                    url_for('upload', blacklist=blacklist, content=content))
        return render_template('base.html',
                               blacklist=blacklist,
                               content=file.read())
    else:
        return render_template('base.html', blacklist=blacklist)
Exemplo n.º 16
0
def execute(matrix, initUserRegion, density, para):
    startTime = time.clock()
    numService = matrix.shape[1]
    numUser = matrix.shape[0]
    rounds = para['rounds']
    logger.info('Data matrix size: %d users * %d services' % (numUser, numService))
    logger.info('Run the algorithm for %d rounds: matrix density = %.2f.' % (rounds, density))
    evalResults = np.zeros((rounds, len(para['metrics'])))
    timeResults = np.zeros((rounds, 1))

    for k in range(rounds):
        logger.info('----------------------------------------------')
        logger.info('%d-round starts.' % (k + 1))
        logger.info('----------------------------------------------')

        # remove the entries of data matrix to generate trainMatrix and testMatrix
        # use k as random seed
        (trainMatrix, testMatrix) = removeEntries(matrix, density, k)
        logger.info('Removing data entries done.')
        (testVecX, testVecY) = np.where(testMatrix)
        testVec = testMatrix[testVecX, testVecY]
        # read the training data, i.e., removed matrix

        # invocation to the prediction function
        iterStartTime = time.clock()  # to record the running time for one round
        predictedMatrix = core.predict(trainMatrix, initUserRegion, para)
        timeResults[k] = time.clock() - iterStartTime

        # calculate the prediction error
        predVec = predictedMatrix[testVecX, testVecY]
        evalResults[k, :] = errMetric(testVec, predVec, para['metrics'])

        logger.info('%d-round done. Running time: %.2f sec' % (k + 1, timeResults[k]))
        logger.info('----------------------------------------------')

    outFile = '%s%sResult_%.2f.txt' % (para['outPath'], para['dataType'], density)
    saveResult(outFile, evalResults, timeResults, para)
    logger.info('Config density = %.2f done. Running time: %.2f sec'
                % (density, time.clock() - startTime))
    logger.info('==============================================')
Exemplo n.º 17
0
def run_server():
    core.load_instances(REST_config, instances)

    if __DEBUG__:
        instance = instances["EN300Twitter"]
        print(
            core.predict(
                instance,
                "why does tom cruise take so many times to figure things out in the movie edge of tomorrow , but gets it right 1 st time in mission impossible ?"
            ))
        print(
            core.batch_predict(instance, [
                "why does tom cruise take so many times to figure things out in the movie edge of tomorrow , but gets it right 1 st time in mission impossible ?",
                "that sucks . i am forced to listen to kpop on thursday class . i guess my cells in brain are going to die out . "
            ]))
    else:  #if not __DEBUG__:
        app.wsgi_app = LoggingMiddleware(app.wsgi_app)
        #app.run(debug=True,
        #        host='0.0.0.0',
        #        port=configurations["port"],
        #        threaded=True,
        #        #threaded= (True if can_fork == False else False),processes =
        #        #(cpu_count() if can_fork else 1),
        #        use_reloader=False)

        ###TORNADO
        ##http_server = HTTPServer(WSGIContainer(app))
        ##http_server.listen(configurations["port"])
        ##IOLoop.current().start()

        #GEVENT
        print("Start gevent WSGI server")
        # use gevent WSGI server instead of the Flask
        http = WSGIServer(('', config["port"]), app.wsgi_app)
        # TODO gracefully handle shutdown
        http.serve_forever()
Exemplo n.º 18
0
def company_result():
	lid=session['loginid']
	q = "select * from tbl_criteria"
	res = select(q)
	input = []
	if "predict" in request.form:
		for i in range(len(res)):
			input.append(0)
		for row in res:
			name  = "criteria_" + str(row['criteria_id'])
			print(name)
			if name in request.form:
				level = float(request.form[name])
				criteria_id = row['criteria_id']
				input[criteria_id-1] = level
		result = core.predict(input,2)
		for row in result:
			q="insert into selection values(null,(select company_id from company where log_id='%s'),'%s',curdate(),'%s')" %(lid,row['student_id'],row['percent'])
			print(q)
			insert(q)
			print(result)
		return render_template('company_result.html',data = result)
	else:
		return render_template('company_predict.html')
Exemplo n.º 19
0
# -*- coding: utf-8 -*-
import sys
from core import predict

print('--- Classifieur Mer vs Ailleurs : TEAM LES PURISTES (mode image)')
print('les images deveront etre placees dans le dossier "images"')

if (len(sys.argv) == 2):
    img_name = sys.argv[1]
else:
    img_name = input('nom de l\'image (exemple: "ocean.jpeg"): ')

predict('./images/' + img_name, 1, 'Mer')
print('--- Classifieur Mer vs Ailleurs : TEAM LES PURISTES (mode dossier)')

if (len(sys.argv) == 2):
    folder_name = sys.argv[1]
else:
    folder_name = input('nom du dossier (exemple: "images"): ')

preds = list()

start = timer()

for img_name in os.listdir(folder_name):
    try:
        preds.append(
            [img_name,
             predict('./' + folder_name + '/' + img_name, 1, 'Mer')])
    except IOError:
        print('Erreur sur ouverture du fichier ')

end = timer()

print("--------- RAPPORT")

total_mer = 0

for elem in preds:
    is_mer = elem[1] >= 0.5

    if is_mer:
        total_mer += 1
Exemplo n.º 21
0
def execute_outlier(tensor, density, para):
    startTime = time.clock()
    [numUser, numService, numTime] = tensor.shape
    rounds = para['rounds']
    logger.info('Data size: %d users * %d services * %d timeslices'\
     %(numUser, numService, numTime))
    logger.info('Run the algorithm for %d rounds: density = %.2f.' %
                (rounds, density))
    evalResults = np.zeros(
        (len(para['outlier_fra']), rounds, len(para['metrics'])))
    timeResults = np.zeros((rounds, 1))

    for k in range(rounds):
        logger.info('----------------------------------------------')
        logger.info('%d-round starts.' % (k + 1))
        logger.info('----------------------------------------------')

        # remove the entries of data to generate trainTensor and testTensor
        (trainTensor, testTensor0, indtrainTensor,
         indtestTensor0) = removeTensor(tensor, density, k, para)
        logger.info('Removing data entries done.')

        # invocation to the prediction function
        iterStartTime = time.clock(
        )  # to record the running time for one round
        predictedTensor = core.predict(trainTensor, para)
        timeResults[k] = time.clock() - iterStartTime
        # calculate the prediction error

        o = 0
        for otf in para['outlier_fra']:
            # testTensor1 = merge_test_outlier(testTensor0, otf, para)
            # logger.info('Merge outlier done.')
            # result = np.zeros((numTime, len(para['metrics'])))

            # for i in range(numTime):
            # 	testMatrix = testTensor1[:, :, i]
            # 	predictedMatrix = predictedTensor[:, :, i]
            # 	(testVecX, testVecY) = np.where(testMatrix)
            # 	testVec = testMatrix[testVecX, testVecY]
            # 	predVec = predictedMatrix[testVecX, testVecY]
            # 	result[i, :] = errMetric(testVec, predVec, para['metrics'])
            # evalResults[o, k, :] = np.average(result, axis=0)
            (testTensor1,
             indtestTensor1) = merge_test_outlier(testTensor0, indtestTensor0,
                                                  otf, para)
            logger.info('Merge outlier done.')
            evalResults[o, k, :] = cal_metric(testTensor1, predictedTensor,
                                              indtestTensor1)
            o = o + 1

        logger.info('%d-round done. Running time: %.2f sec' %
                    (k + 1, timeResults[k]))
        logger.info('----------------------------------------------')

    o = 0
    for outlf in para['outlier_fra']:
        outFile = '%savg_%sResult_outlier_fra%.2f.txt' % (
            para['outPath'], para['dataType'], outlf)
        saveResult(outFile, evalResults[o, :, :], timeResults, para)
        o = o + 1

    logger.info('Density = %.2f done. Running time: %.2f sec' %
                (density, time.clock() - startTime))
    logger.info('==============================================')