コード例 #1
0
  result = relext.sire( params = {
    'sid': 'ie-en-news',
    'txt': input_query,
    'rt': 'json'
    } );

  # Type of the returned document: XML document
  response.content_type = 'text/json';

  return result;


# Setting up a Watson service
# - url and auth(username,password) found in the credentails of a service
# - operations: found at http://www.ibm.com/smarterplanet/us/en/ibmwatson/developercloud/apis/
@post('/speech-to-text')
def speech_to_text():
  speech_to_text = WatsonService(
    url = 'https://stream.watsonplatform.net/speech-to-text/api',
      auth = ('7112a49-50bf-4645-a877-2c57ef208bf5','25FXpxclz2sA'),
    operations = {
      'sire': {
        'method': 'POST',
        'path': '/v1/sire/0'
        }
      }
    );


WebServer.start();
コード例 #2
0
ファイル: server.py プロジェクト: SherlockProject/eTeaching
            response = {
                'type': 'message',
                'text': randomText
            }

    else:
        response = {
            'type': 'error',
            'error': 'Unknown request type'
        }

    # generate sound file
    if (request['sound'] == 'on' and response['text']
            and response['text'] != hello):
        result = textToSpeech.synthesize(
            params={
                'voice': 'en-US_MichaelVoice',
                'text': response['text'],
                'accept': 'audio/wav'
            })

        # Save File
        ogg = open('static/sound/answer.wav', 'wb')
        ogg.write(result)

    return WebServer.processResponse(response)


WebServer.start()

# cf push eTeaching -p eTeaching -m 512M -n eteaching
コード例 #3
0
ファイル: server.py プロジェクト: SherlockProject/eTeaching
def process_func():
    request = JSON.loads(bottle.request.POST['request'])

    if (request['type'] == 'start'):
        WebServer.start_conversation()

        response = {
            'type': 'message',
            'text': hello
        }

    elif (request['type'] == 'message'):
        # import pprint;
        # pprint.pprint( user.info ); # user and conversation identifiers
        # pprint.pprint( user.data ); # empty dict that can be used throughout the whole conversation as storage
        # e.g. you can store all images that you have already used in a conversation

        # Example usage of user.data: counting answers
        if ('iteration' not in user.data):
            user.data['iteration'] = 0

        user.data['iteration'] = user.data['iteration'] + 1
        print('\nIteration: ' + str(user.data['iteration']))

        # If user typed "image" in the text box
        if (request['text'] == 'image'):

            #-------------------------- Download Random Image --------------------------------------------------------#
            r = requests.get('http://lorempixel.com/600/400/', stream=True)
            image_path = 'static/work_images/' + str(
                user.info['userID']) + '.jpg'

            if r.status_code == 200:
                with open(image_path, 'wb') as f:
                    r.raw.decode_content = True
                    shutil.copyfileobj(r.raw, f)
            #---------------------------------------------------------------------------------------------------------#

            response = {
                'type': 'image',
                'text':
                'Take a look at this photo. Can you help me find Waldo?',
                'path': image_path,
                'imid':
                'image-identifier'  # unique identifier (image name/id in a database)
            }

        else:

            #-------------------------- Generate Random Text ---------------------------------------------------------#
            randomText = requests.get(
                'http://loripsum.net/api/plaintext/1/short/headers'
            ).text.split('\n')[0]
            #---------------------------------------------------------------------------------------------------------#

            response = {
                'type': 'message',
                'text': randomText
            }

    else:
        response = {
            'type': 'error',
            'error': 'Unknown request type'
        }

    # generate sound file
    if (request['sound'] == 'on' and response['text']
            and response['text'] != hello):
        result = textToSpeech.synthesize(
            params={
                'voice': 'en-US_MichaelVoice',
                'text': response['text'],
                'accept': 'audio/wav'
            })

        # Save File
        ogg = open('static/sound/answer.wav', 'wb')
        ogg.write(result)

    return WebServer.processResponse(response)
コード例 #4
0
ファイル: server.py プロジェクト: SherlockProject/eTeaching
def process_func():
	request = JSON.loads( bottle.request.POST[ 'request' ] );

	if( request['type'] == 'start' ):
		WebServer.start_conversation();

		response = {
			'type': 'message',
			'text': hello
		};

	elif( request['type'] == 'message' ):
		# import pprint;
		# pprint.pprint( user.info ); # user and conversation identifiers
		# pprint.pprint( user.data ); # empty dict that can be used throughout the whole conversation as storage
		                              # e.g. you can store all images that you have already used in a conversation

		# Example usage of user.data: counting answers
		if( 'iteration' not in user.data ):
			user.data['iteration'] = 0;

		user.data['iteration'] = user.data['iteration'] + 1;
		print( '\nIteration: ' + str( user.data['iteration'] ) );

		# If user typed "image" in the text box
		if( request['text'] == 'image' ):

			#-------------------------- Download Random Image --------------------------------------------------------#
			r = requests.get( 'http://lorempixel.com/600/400/', stream=True );
			image_path = 'static/work_images/' + str( user.info['userID'] ) + '.jpg';

			if r.status_code == 200:
				with open( image_path, 'wb' ) as f:
					r.raw.decode_content = True;
					shutil.copyfileobj( r.raw, f );
			#---------------------------------------------------------------------------------------------------------#

			response = {
				'type': 'image',
				'text': 'Take a look at this photo. Can you help me find Waldo?',
				'path': image_path,
				'imid': 'image-identifier' # unique identifier (image name/id in a database)
			};

		else:

			#-------------------------- Generate Random Text ---------------------------------------------------------#
			randomText = requests.get( 'http://loripsum.net/api/plaintext/1/short/headers' ).text.split( '\n' )[0];
			#---------------------------------------------------------------------------------------------------------#

			response = {
				'type': 'message',
				'text': randomText
			};

	else:
		response = {
			'type': 'error',
			'error': 'Unknown request type'
		};

	# generate sound file
	if( request['sound'] == 'on' and response['text'] and response['text'] != hello ):
		result = textToSpeech.synthesize( params = {
			'voice': 'en-US_MichaelVoice',
			'text': response['text'],
			'accept': 'audio/wav'
		} );

		# Save File
		ogg = open( 'static/sound/answer.wav', 'wb' );
		ogg.write( result );

	return WebServer.processResponse( response );