4040 from . import (
4141 assistant_helpers ,
4242 audio_helpers ,
43+ browser_helpers ,
4344 device_helpers
4445 )
4546except (SystemError , ImportError ):
4647 import assistant_helpers
4748 import audio_helpers
49+ import browser_helpers
4850 import device_helpers
4951
5052
5153ASSISTANT_API_ENDPOINT = 'embeddedassistant.googleapis.com'
5254END_OF_UTTERANCE = embedded_assistant_pb2 .AssistResponse .END_OF_UTTERANCE
5355DIALOG_FOLLOW_ON = embedded_assistant_pb2 .DialogStateOut .DIALOG_FOLLOW_ON
5456CLOSE_MICROPHONE = embedded_assistant_pb2 .DialogStateOut .CLOSE_MICROPHONE
57+ PLAYING = embedded_assistant_pb2 .ScreenOutConfig .PLAYING
5558DEFAULT_GRPC_DEADLINE = 60 * 3 + 5
5659
5760
@@ -70,12 +73,13 @@ class SampleAssistant(object):
7073 """
7174
7275 def __init__ (self , language_code , device_model_id , device_id ,
73- conversation_stream ,
76+ conversation_stream , display ,
7477 channel , deadline_sec , device_handler ):
7578 self .language_code = language_code
7679 self .device_model_id = device_model_id
7780 self .device_id = device_id
7881 self .conversation_stream = conversation_stream
82+ self .display = display
7983
8084 # Opaque blob provided in AssistResponse that,
8185 # when provided in a follow-up AssistRequest,
@@ -142,6 +146,7 @@ def iter_log_assist_requests():
142146 for r in resp .speech_results ))
143147 if len (resp .audio_out .audio_data ) > 0 :
144148 if not self .conversation_stream .playing :
149+ self .conversation_stream .stop_recording ()
145150 self .conversation_stream .start_playback ()
146151 logging .info ('Playing assistant response.' )
147152 self .conversation_stream .write (resp .audio_out .audio_data )
@@ -165,6 +170,9 @@ def iter_log_assist_requests():
165170 fs = self .device_handler (device_request )
166171 if fs :
167172 device_actions_futures .extend (fs )
173+ if self .display and resp .screen_out .data :
174+ system_browser = browser_helpers .system_browser
175+ system_browser .display (resp .screen_out .data )
168176
169177 if len (device_actions_futures ):
170178 logging .info ('Waiting for device executions to complete.' )
@@ -200,6 +208,9 @@ def gen_assist_requests(self):
200208 device_model_id = self .device_model_id ,
201209 )
202210 )
211+ if self .display :
212+ config .screen_out_config .screen_mode = PLAYING
213+
203214 # The first AssistRequest must contain the AssistConfig
204215 # and no audio data.
205216 yield embedded_assistant_pb2 .AssistRequest (config = config )
@@ -241,6 +252,9 @@ def gen_assist_requests(self):
241252 metavar = '<language code>' ,
242253 default = 'en-US' ,
243254 help = 'Language code of the Assistant' )
255+ @click .option ('--display' , is_flag = True , default = False ,
256+ help = 'Enable visual display of Assistant '
257+ 'rich media responses (for certain queries).' )
244258@click .option ('--verbose' , '-v' , is_flag = True , default = False ,
245259 help = 'Verbose logging.' )
246260@click .option ('--input-audio-file' , '-i' ,
@@ -279,7 +293,8 @@ def gen_assist_requests(self):
279293@click .option ('--once' , default = False , is_flag = True ,
280294 help = 'Force termination after a single conversation.' )
281295def main (api_endpoint , credentials , project_id ,
282- device_model_id , device_id , device_config , lang , verbose ,
296+ device_model_id , device_id , device_config ,
297+ lang , display , verbose ,
283298 input_audio_file , output_audio_file ,
284299 audio_sample_rate , audio_sample_width ,
285300 audio_iter_size , audio_block_size , audio_flush_size ,
@@ -424,7 +439,7 @@ def blink(speed, number):
424439 time .sleep (delay )
425440
426441 with SampleAssistant (lang , device_model_id , device_id ,
427- conversation_stream ,
442+ conversation_stream , display ,
428443 grpc_channel , grpc_deadline ,
429444 device_handler ) as assistant :
430445 # If file arguments are supplied:
0 commit comments