o
    Thh                     @   sz  U d Z ddlZddlZddlZddlZddlZddlZddlmZmZm	Z	m
Z
mZmZmZ ddlZddlZddlZddlmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlm Z  ddlm!Z" ddlm#Z$ ddl%m&Z& ddl'm(Z( ddl'm)Z) zddl*m+Z+ ddl*m,Z- W n e.y   ddl/m+Z+ ddl/m,Z- Y nw ej0rddl1m2Z3 ddl4m5Z6 ddl7m8Z8 ddlm9Z9 nBeZ3ej:e;d< eZ6ej:e;d< eZ8ej:e;d< zddl1m2Z3 ddl4m5Z6 ddl7m8Z8 ddlm9Z9 W n e<y   dZ3dZ6dZ8dZ9Y nw e=dZ>dZ?G dd  d Z@G d!d" d"ejAZBd#e d$e
ejC d%ejDfd&d'ZEdS )(z[Preview] Live API client.    N)AnyAsyncIteratorDictOptionalSequenceUnionget_args)ConnectionClosed   )_api_module)_common)_live_converters)
_mcp_utils)_transformers)client)errors)types)BaseApiClient)get_value_by_path)set_value_by_path)AsyncLiveMusic_Content_to_mldev_Content_to_vertex)ClientConnection)connect)ClientSession)Tool)McpToGenAiToolAdapter)mcp_to_gemini_toolMcpClientSessionMcpToolr   zgoogle_genai.liveznFunctionResponse request must have an `id` field from the response of a ToolCall.FunctionalCalls in Google AI.c                   @   s  e Zd ZdZdedefddZdddd	eee	j
e	je	je	je	je	jee	j f  d
ee ddfddZddddeee	je	jeee	je	jf  f  deddfddZdddddddddee	j dee	j dee dee	j dee dee	j dee	j ddfddZdee	jee	j f ddfdd Zdee	j fd!d"Z d#ee! d$edee	j fd%d&Z"de	jfd'd(Z#d)ee! d$ed*e$j%ddfd+d,Z&		d1d	eee	j
e	je	je	je	je	jee	j f  d
ee de	j'fd-d.Z(d2d/d0Z)dS )3AsyncSessionz[Preview] AsyncSession.
api_client	websocketc                 C   s   || _ || _d S N)_api_client_ws)selfr$   r%    r*   T/home/air/segue/gemini/backup/venv/lib/python3.10/site-packages/google/genai/live.py__init__V   s   
zAsyncSession.__init__NF)inputend_of_turnr-   r.   returnc                   s:   t jdtdd | ||}| jt|I dH  dS )a  [Deprecated] Send input to the model.

    > **Warning**: This method is deprecated and will be removed in a future
    version (not before Q3 2025). Please use one of the more specific methods:
    `send_client_content`, `send_realtime_input`, or `send_tool_response`
    instead.

    The method will send the input request to the server.

    Args:
      input: The input request to the model.
      end_of_turn: Whether the input is the last message in a turn.

    Example usage:

    .. code-block:: python

      client = genai.Client(api_key=API_KEY)

      async with client.aio.live.connect(model='...') as session:
        await session.send(input='Hello world!', end_of_turn=True)
        async for message in session.receive():
          print(message)
    zThe `session.send` method is deprecated and will be removed in a future version (not before Q3 2025).
Please use one of the more specific methods: `send_client_content`, `send_realtime_input`, or `send_tool_response` instead.   
stacklevelN)warningswarnDeprecationWarning_parse_client_messager(   sendjsondumps)r)   r-   r.   client_messager*   r*   r+   r7   Z   s   (zAsyncSession.sendTturnsturn_completer<   r=   c                   sP   t ||}| jjrtj|d}ntj|d}| jt	
d|iI dH  dS )a	  Send non-realtime, turn based content to the model.

    There are two ways to send messages to the live API:
    `send_client_content` and `send_realtime_input`.

    `send_client_content` messages are added to the model context **in order**.
    Having a conversation using `send_client_content` messages is roughly
    equivalent to using the `Chat.send_message_stream` method, except that the
    state of the `chat` history is stored on the API server.

    Because of `send_client_content`'s order guarantee, the model cannot
    respond as quickly to `send_client_content` messages as to
    `send_realtime_input` messages. This makes the biggest difference when
    sending objects that have significant preprocessing time (typically images).

    The `send_client_content` message sends a list of `Content` objects,
    which has more options than the `media:Blob` sent by `send_realtime_input`.

    The main use-cases for `send_client_content` over `send_realtime_input` are:

    - Prefilling a conversation context (including sending anything that can't
      be represented as a realtime message), before starting a realtime
      conversation.
    - Conducting a non-realtime conversation, similar to `client.chat`, using
      the live api.

    Caution: Interleaving `send_client_content` and `send_realtime_input`
      in the same conversation is not recommended and can lead to unexpected
      results.

    Args:
      turns: A `Content` object or list of `Content` objects (or equivalent
        dicts).
      turn_complete: if true (the default) the model will reply immediately. If
        false, the model will wait for you to send additional client_content,
        and will not return until you send `turn_complete=True`.

    Example:
    ```
    import google.genai
    from google.genai import types
    import os

    if os.environ.get('GOOGLE_GENAI_USE_VERTEXAI'):
      MODEL_NAME = 'gemini-2.0-flash-live-preview-04-09'
    else:
      MODEL_NAME = 'gemini-2.0-flash-live-001';

    client = genai.Client()
    async with client.aio.live.connect(
        model=MODEL_NAME,
        config={"response_modalities": ["TEXT"]}
    ) as session:
      await session.send_client_content(
          turns=types.Content(
              role='user',
              parts=[types.Part(text="Hello world!")]))
      async for msg in session.receive():
        if msg.text:
          print(msg.text)
    ```
    from_objectclient_contentN)tt_client_contentr'   vertexailive_converters_LiveClientContent_to_vertex_LiveClientContent_to_mldevr(   r7   r8   r9   )r)   r<   r=   r@   client_content_dictr*   r*   r+   send_client_content   s   J z AsyncSession.send_client_content)mediaaudioaudio_stream_endvideotextactivity_startactivity_endrI   rJ   rK   rL   rM   rN   rO   c                   s  i }|dur||d< |dur||d< |dur||d< |dur#||d< |dur+||d< |dur3||d< |dur;||d< t |d	krQtd
t | dt|  tj|}	| jjrbt	j
|	d}
nt	j|	d}
t|
}
t|
}
| jtd|
iI dH  dS )au  Send realtime input to the model, only send one argument per call.

    Use `send_realtime_input` for realtime audio chunks and video
    frames(images).

    With `send_realtime_input` the api will respond to audio automatically
    based on voice activity detection (VAD).

    `send_realtime_input` is optimized for responsivness at the expense of
    deterministic ordering. Audio and video tokens are added to the
    context when they become available.

    Args:
      media: A `Blob`-like object, the realtime media to send.

    Example:
    ```
    from pathlib import Path

    from google import genai
    from google.genai import types

    import PIL.Image

    import os

    if os.environ.get('GOOGLE_GENAI_USE_VERTEXAI'):
      MODEL_NAME = 'gemini-2.0-flash-live-preview-04-09'
    else:
      MODEL_NAME = 'gemini-2.0-flash-live-001';


    client = genai.Client()

    async with client.aio.live.connect(
        model=MODEL_NAME,
        config={"response_modalities": ["TEXT"]},
    ) as session:
      await session.send_realtime_input(
          media=PIL.Image.open('image.jpg'))

      audio_bytes = Path('audio.pcm').read_bytes()
      await session.send_realtime_input(
          media=types.Blob(data=audio_bytes, mime_type='audio/pcm;rate=16000'))

      async for msg in session.receive():
        if msg.text is not None:
          print(f'{msg.text}')
    ```
    NrI   rJ   rK   rL   rM   rN   rO   r
   z"Only one argument can be set, got z: r>   realtime_input)len
ValueErrorlistkeysr   LiveSendRealtimeInputParametersmodel_validater'   rC   rD   *_LiveSendRealtimeInputParameters_to_vertex)_LiveSendRealtimeInputParameters_to_mldevr   convert_to_dictencode_unserializable_typesr(   r7   r8   r9   )r)   rI   rJ   rK   rL   rM   rN   rO   kwargsrP   realtime_input_dictr*   r*   r+   send_realtime_input   sN   =

 z AsyncSession.send_realtime_inputfunction_responsesc                   sv   t |}| jjrtj|d}ntj|d}|dg D ]}|ddu r*tt	q| j
td|iI dH  dS )a  Send a tool response to the session.

    Use `send_tool_response` to reply to `LiveServerToolCall` messages
    from the server.

    To set the available tools, use the `config.tools` argument
    when you connect to the session (`client.live.connect`).

    Args:
      function_responses: A `FunctionResponse`-like object or list of
        `FunctionResponse`-like objects.

    Example:
    ```
    from google import genai
    from google.genai import types

    import os

    if os.environ.get('GOOGLE_GENAI_USE_VERTEXAI'):
      MODEL_NAME = 'gemini-2.0-flash-live-preview-04-09'
    else:
      MODEL_NAME = 'gemini-2.0-flash-live-001';

    client = genai.Client()

    tools = [{'function_declarations': [{'name': 'turn_on_the_lights'}]}]
    config = {
        "tools": tools,
        "response_modalities": ['TEXT']
    }

    async with client.aio.live.connect(
        model='models/gemini-2.0-flash-live-001',
        config=config
    ) as session:
      prompt = "Turn on the lights please"
      await session.send_client_content(
          turns={"parts": [{'text': prompt}]}
      )

      async for chunk in session.receive():
          if chunk.server_content:
            if chunk.text is not None:
              print(chunk.text)
          elif chunk.tool_call:
            print(chunk.tool_call)
            print('_'*80)
            function_response=types.FunctionResponse(
                    name='turn_on_the_lights',
                    response={'result': 'ok'},
                    id=chunk.tool_call.function_calls[0].id,
                )
            print(function_response)
            await session.send_tool_response(
                function_responses=function_response
            )

            print('_'*80)
    r>   functionResponsesidNtool_response)rA   t_tool_responser'   rC   rD   !_LiveClientToolResponse_to_vertex _LiveClientToolResponse_to_mldevgetrR   _FUNCTION_RESPONSE_REQUIRES_IDr(   r7   r8   r9   )r)   r^   ra   tool_response_dictresponser*   r*   r+   send_tool_responseL  s   
D zAsyncSession.send_tool_responsec                 C  sL   |   I dH  }r$|jr|jjr|V  dS |V  |   I dH  }s
dS dS )a  Receive model responses from the server.

    The method will yield the model responses from the server. The returned
    responses will represent a complete model turn. When the returned message
    is function call, user must call `send` with the function response to
    continue the turn.

    Yields:
      The model responses from the server.

    Example usage:

    .. code-block:: python

      client = genai.Client(api_key=API_KEY)

      async with client.aio.live.connect(model='...') as session:
        await session.send(input='Hello world!', end_of_turn=True)
        async for message in session.receive():
          print(message)
    N)_receiveserver_contentr=   )r)   resultr*   r*   r+   receive  s   zAsyncSession.receivestream	mime_typec                C  s   t jdtdd t }t| ||| d}| sWz,t|  }tj	|t|	 gtj
dI dH  | rH| V  tdI dH  W n	 tyR   Y nw | r|durw| sy|  z|I dH  W dS  tjyv   Y dS w dS dS )a  [Deprecated] Start a live session from a data stream.

    > **Warning**: This method is deprecated and will be removed in a future
    version (not before Q2 2025). Please use one of the more specific methods:
    `send_client_content`, `send_realtime_input`, or `send_tool_response`
    instead.

    The interaction terminates when the input stream is complete.
    This method will start two async tasks. One task will be used to send the
    input stream to the model and the other task will be used to receive the
    responses from the model.

    Args:
      stream: An iterator that yields the model response.
      mime_type: The MIME type of the data in the stream.

    Yields:
      The audio bytes received from the model and server response messages.

    Example usage:

    .. code-block:: python

      client = genai.Client(api_key=API_KEY)
      config = {'response_modalities': ['AUDIO']}
      async def audio_stream():
        stream = read_audio()
        for data in stream:
          yield data
      async with client.aio.live.connect(model='...', config=config) as session:
        for audio in session.start_stream(stream = audio_stream(),
        mime_type = 'audio/pcm'):
          play_audio_chunk(audio.data)
    zSetting `AsyncSession.start_stream` is deprecated, and will be removed in a future release (not before Q3 2025). Please use the `receive`, and `send_realtime_input`, methods instead.   r1   N)return_when-q=)r3   r4   r5   asyncioEventcreate_task
_send_loopis_setrj   waitFIRST_COMPLETEDdonerl   sleepr	   cancelCancelledError)r)   rn   ro   
stop_event	recv_taskr*   r*   r+   start_stream  sD   %
zAsyncSession.start_streamc                    s   t  }z| jjddI d H }W n ty"   | j I d H }Y nw |r=zt|}W n tjjy<   t	d|w i }| j
jrIt|}nt|}t jj|| dS )NFdecodezFailed to parse response: )rh   r[   )r   LiveServerMessager(   recv	TypeErrorr8   loadsdecoderJSONDecodeErrorrR   r'   rC   rD   _LiveServerMessage_from_vertex_LiveServerMessage_from_mldev_from_response
model_dump)r)   parameter_modelraw_responserh   response_dictr*   r*   r+   rj     s(   
zAsyncSession._receivedata_streamr~   c                    s\   |2 z#3 d H W }t jt j||dgd}| j|dI d H  tdI d H  q6 |  d S )Ndataro   media_chunks)r-   rr   )r   LiveClientRealtimeInputBlobr7   rs   r{   set)r)   r   ro   r~   r   model_inputr*   r*   r+   rv     s   zAsyncSession._send_loopc              
      s  |}|st d dddiiS t|tr|g}ndt|trVd|v rVz
tjd(i |}W n tjy@   t	dt
| d| dw t|tjrUt|jtrU|jd	dd
g}n&t|tjr`|g}nt|tr|d|v r|d|v r|| jjsyd|vryt	t|g}t|trtdd |D rg }|D ]Y}t|trz
tjd(i |}W n tjy   t	dt
| d| dw |jd u r| jjst	t|jdd	d}tj|d|dd}	|dr|d|	d< ||	 qtjtj|dd}
|
S t|trtdd |D ri  g }|D ]}t|ttjr|| q
| jjr. fddt|D }n fddt|D }g }|D ]6}z
tjd(i |}W n tjy`   t	dt
| d| dw |tj|jdd	dd |j d q>tjtj!||dd}
|
S t|trtdd |D rntdd |D rdd |D }nt	dt
| d| dtjtj"|dd }
|
S t|trkd!|v sd"|v rd"|v r|d" }n|d! }tjtj!||ddd}
|
S d#|v r%z
tj#d(i |}W n tjy   t	dt
| d| dw tjtj"|jdd	dd# dd }
|
S d$|v r^z
tj$d(i |}W n tjyJ   t	dt
| d| dw tjtj|jdd	dd$ dd}
|
S t	dt
| d| dt|tj#r|jdd	d}tjtj"|d#dd }
|
d% d ur |
d% d# d ur t|
d% d# d& d tr g }|
d% d# D ]M}t|trz
tjd(i |}W n tjy   t	dt
| d| dw t|tjrt|jtr|jd ur|tj%t&'|j|j(d' q||
d% d#< |
S t|tj)r#|jdd	d}tjtj!|d"|ddd}
|
S t|tj$rT| jjs@|j*d ur@|j*d& js@t	ttjtj|jdd	dd$dd}
|
S t|tjr| jjsh|jsht	t|jdd	d}tj|d|dd}	|dr|d|	d< tjtj|	gdd}
|
S t|trt|d& tjr| jjs|d& jst	tg }|D ])}|jdd	d}tj|d|dd}	|dr|d|	d< ||	 qtjtj|dd}
|
S t	dt
| d| d))Nz0No input provided. Assume it is the end of turn.r@   r=   Tr   zUnsupported input type "z" or input content ""r8   )modeexclude_nonenamerh   r`   c                 s   s*    | ]}t |tod |v od|v V  qdS )r   rh   N
isinstancedict.0cr*   r*   r+   	<genexpr>W  s
    
z5AsyncSession._parse_client_message.<locals>.<genexpr>r   r   )r   rh   )r^   )ra   c                 s   s    | ]}t |tV  qd S r&   )r   strr   r*   r*   r+   r   }  s    

c                       g | ]}t | qS r*   r   r   item	to_objectr*   r+   
<listcomp>      z6AsyncSession._parse_client_message.<locals>.<listcomp>c                    r   r*   r   r   r   r*   r+   r     r   parts)r   roler;   )r@   c                 s   s"    | ]}t |tod |v V  qdS )r   Nr   r   br*   r*   r+   r     s     c                 s   s    | ]	}t |tjV  qd S r&   )r   r   r   r   r*   r*   r+   r     s    c                 S   s   g | ]	}|j d ddqS )Tr8   r   )r   r   r*   r*   r+   r     s    r   )rP   contentr<   r   r^   rP   r   r   r*   )+logginginfor   r   r   r   r   pydanticValidationErrorrR   typer   bytesr   r'   rC   rf   r   anyFunctionResponser`   FunctionResponseDictre   appendLiveClientMessageDictLiveClientToolResponseDictr   	PartUnionrA   
t_contentsContentContentDictr   LiveClientContentDictLiveClientRealtimeInputDictr   LiveClientToolResponseBlobDictbase64	b64decodero   LiveClientContentr^   )r)   r-   r.   formatted_input
blob_inputfunction_responses_inputr   function_response_inputfunction_response_dictfunction_response_typeddictr:   content_input_partscontentscontent_dict_listcontent_inputcontent_turnsrP   tool_response_inputr\   formatted_media_chunksrG   function_response_listr*   r   r+   r6   +  sV  





 ` 




	 6  $ 

  
 	 
y


	H@
2
z"AsyncSession._parse_client_messagec                    s   | j  I d H  d S r&   )r(   closer)   r*   r*   r+   r   Y  s   zAsyncSession.close)NF)r/   N)*__name__
__module____qualname____doc__r   r   r,   r   r   r   ContentListUnionContentListUnionDictLiveClientContentOrDictLiveClientRealtimeInputOrDictLiveClientToolResponseOrDictFunctionResponseOrDictr   boolr7   r   r   rS   rH   BlobImageUnionDict
BlobOrDictr   ActivityStartOrDictActivityEndOrDictr]   ri   r   r   rm   r   r   rj   rs   rt   rv   r   r6   r   r*   r*   r*   r+   r#   S   s    
<

Z	

h
S
H

  0r#   c                	       sd   e Zd ZdZdef fddZedefddZe	j
dd	d
edeej dee fddZ  ZS )	AsyncLivez[Preview] AsyncLive.r$   c                    s   t  | t|| _d S r&   )superr,   r   _music)r)   r$   	__class__r*   r+   r,   a  s   zAsyncLive.__init__r/   c                 C   s   | j S r&   )r   r   r*   r*   r+   musice  s   zAsyncLive.musicN)configmodelr   c                C  s  t |trtjd'i |}|r|jrtd| j }t |tr&|	d}t
| j|}t| j|I dH }| jjr| jjs| jjj}| jj}d}d}	|dr\tjdtjd d	}d
}	| d| d| d|	 d| 	}
| jjj}ttj| jtj||djddd}|d= t|ddg| t|}n| jjr| jjr| jj}| jjj}| d| d}
| jjj}ttj | jtj||djddd}|d= t|ddg| t|}n| jj!st"j#j$dgd\}}n| jj!}|j%r|j&st"j#j'j() }|*| |j%}| jjj}|dur|+dd,|i | jjj}| d| d}
| jj-}| jj.}|dr;d| d| d| }ttj | jtj||djddd}|d= t/|g d du ret|g d d!g t|}|j0rt12|j0r|du r|i }t13| zBt4|
|d"4 I dH )}|5|I dH  t67|j8d#d$I dH  t9| j|d%V  W d  I dH  W dS 1 I dH sw   Y  W dS  t:y   t4|
|d&4 I dH '}|5|I dH  t67|8 I dH  t9| j|d%V  W d  I dH  Y dS 1 I dH sw   Y  Y dS w )(a  [Preview] Connect to the live server.

    Note: the live API is currently in preview.

    Usage:

    .. code-block:: python

      client = genai.Client(api_key=API_KEY)
      config = {}
      async with client.aio.live.connect(model='...', config=config) as session:
        await session.send_client_content(
          turns=types.Content(
            role='user',
            parts=[types.Part(text='hello!')]
          ),
          turn_complete=True
        )
        async for message in session.receive():
          print(message)

    Args:
      model: The model to use for the live session.
      config: The configuration for the live session.
      **kwargs: additional keyword arguments.

    Yields:
      An AsyncSession object.
    zgoogle.genai.client.aio.live.connect() does not support http_options at request-level in LiveConnectConfig yet. Please use the client-level http_options configuration instead.zutf-8NBidiGenerateContentkeyzauth_tokens/zUThe SDK's ephemeral token support is experimental, and may change in future versions.)messagecategoryBidiGenerateContentConstrainedaccess_tokenz!/ws/google.ai.generativelanguage.z.GenerativeService.?=)r   r   T)r   )r$   r?   r   setupr   z/ws/google.cloud.aiplatform.z#.LlmBidiService/BidiGenerateContentz.https://www.googleapis.com/auth/cloud-platform)scopesAuthorizationz	Bearer {}zpublishers/z	projects/z/locations//)r   generationConfigresponseModalitiesAUDIO)additional_headersFr   )r$   r%   )extra_headersr*   );r   r   r   LiveConnectConfighttp_optionsrR   r'   _websocket_base_urlr   r   rA   t_model_t_live_connect_configapi_keyrC   _http_optionsapi_version
startswithr3   r4   r   ExperimentalWarningheadersr   rY   rD   _LiveConnectParameters_to_mldevLiveConnectParametersr   setvr8   r9    _LiveConnectParameters_to_vertex_credentialsgoogleauthdefaulttokenvalid	transportrequestsRequestrefreshupdateformatlocationprojectgetvtoolsr   has_mcp_tool_usageset_mcp_usage_header
ws_connectr7   loggerr   r   r#   r   )r)   r   r   base_urltransformed_modelr   versionr  methodkey_nameurir  request_dictrequestcreds_auth_reqbearer_tokenr  r  wsr*   r*   r+   r   i  s   
%





 
	

	




	


44zAsyncLive.connect)r   r   r   r   r   r,   propertyr   r   
contextlibasynccontextmanagerr   r   r   LiveConnectConfigOrDictr   r#   r   __classcell__r*   r*   r   r+   r   ^  s    r   r$   r   r/   c                    s6  |d u r
t  }n;t|tr/t|dgd ur!tt|dg}nd }t jdi |}||_n|jd u r7d }n	tt|dg}|}||_|jdd id}|j	rg |_	|j	D ]5}t
d urst|t
rst|| I d H }|j	|j	 qVtd urt|tr|j	t| qV|j	| qV|jd urtjdtdd |S )Nsystem_instructionr  )r  zSetting `LiveConnectConfig.generation_config` is deprecated, please set the fields on `LiveConnectConfig` directly. This will become an error in a future version (not before Q3 2025)rp   r1   r*   )r   r  r   r   r  rA   	t_contentr6  
model_copyr  r!   r   
list_toolsextendr"   r   r    generation_configr3   r4   r5   )r$   r   r   converted_system_instructionr6  parameter_model_copytoolmcp_to_genai_tool_adapterr*   r*   r+   r    sF   





r  )Fr   rs   r   r2  r8   r   typingr   r   r   r   r   r   r   r3   google.authr  r   
websocketsr	    r   r   r   rD   r   r   rA   r   r   r   r'   r   r   r  r   r  
live_musicr   modelsr   r   websockets.asyncio.clientr   r   r"  ModuleNotFoundErrorwebsockets.clientTYPE_CHECKINGmcpr   r!   	mcp.typesr   r"   	_adaptersr   r    Type__annotations__ImportError	getLoggerr#  rf   r#   
BaseModuler   r4  r  r  r*   r*   r*   r+   <module>   s   $
       @