
    zvh(                    D   d dl mZ d dlmZ d dlmZ d dlZddlmZ ddl	m
Z
mZmZmZmZ ddlmZmZ dd	lmZ dd
lmZmZ ddlmZmZmZmZ ddlmZ ddlmZ ddl m!Z! ddgZ" G d de      Z# G d de      Z$ G d d      Z% G d d      Z& G d d      Z' G d d      Z(y)    )annotations)Union)LiteralN   )_legacy_response)	NOT_GIVENBodyQueryHeadersNotGiven)maybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)StreamedBinaryAPIResponseAsyncStreamedBinaryAPIResponse#to_custom_streamed_response_wrapper)async_to_custom_streamed_response_wrapper)speech_create_params)make_request_options)SpeechModelSpeechAsyncSpeechc            	      z    e Zd Zedd       Zedd       Zeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddZy)	r   c                    t        |       S a  
        This property can be used as a prefix for any HTTP method call to return
        the raw response object instead of the parsed content.

        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
        )SpeechWithRawResponseselfs    U/var/www/html/luna/venv/lib/python3.12/site-packages/openai/resources/audio/speech.pywith_raw_responsezSpeech.with_raw_response   s     %T**    c                    t        |       S z
        An alternative to `.with_raw_response` that doesn't eagerly read the response body.

        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
        )SpeechWithStreamingResponser   s    r!   with_streaming_responsezSpeech.with_streaming_response'   s     +400r#   Ninstructionsresponse_formatspeedstream_formatextra_headersextra_query
extra_bodytimeoutc                   ddi|xs i }| j                  dt        |||||||dt        j                        t	        ||	|
|      t
        j                        S )  
        Generates audio from the input text.

        Args:
          input: The text to generate audio for. The maximum length is 4096 characters.

          model:
              One of the available [TTS models](https://platform.openai.com/docs/models#tts):
              `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.

          voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
              `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
              `verse`. Previews of the voices are available in the
              [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).

          instructions: Control the voice of your generated audio with additional instructions. Does not
              work with `tts-1` or `tts-1-hd`.

          response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
              `wav`, and `pcm`.

          speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
              the default.

          stream_format: The format to stream the audio in. Supported formats are `sse` and `audio`.
              `sse` is not supported for `tts-1` or `tts-1-hd`.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Acceptapplication/octet-stream/audio/speechinputmodelvoicer)   r*   r+   r,   r-   r.   r/   r0   bodyoptionscast_to)_postr   r   SpeechCreateParamsr   r   HttpxBinaryResponseContentr    r7   r8   r9   r)   r*   r+   r,   r-   r.   r/   r0   s               r!   createzSpeech.create0   s    l "#=W-BUSUWzz """$0'6"%2 %77 )+Q[el %??#  
 	
r#   )returnr   )rD   r&   r7   strr8   zUnion[str, SpeechModel]r9   zsUnion[str, Literal['alloy', 'ash', 'ballad', 'coral', 'echo', 'fable', 'onyx', 'nova', 'sage', 'shimmer', 'verse']]r)   zstr | NotGivenr*   z>Literal['mp3', 'opus', 'aac', 'flac', 'wav', 'pcm'] | NotGivenr+   zfloat | NotGivenr,   z"Literal['sse', 'audio'] | NotGivenr-   zHeaders | Noner.   zQuery | Noner/   zBody | Noner0   z'float | httpx.Timeout | None | NotGivenrD   z+_legacy_response.HttpxBinaryResponseContent__name__
__module____qualname__r   r"   r'   r   rC    r#   r!   r   r      s    + + 1 1  (1Zc"+<E )-$("&;D#I
 I
 '	I


I
 %I
 XI
  I
 :I
 &I
 "I
   !I
" 9#I
$ 
5%I
r#   c            	      z    e Zd Zedd       Zedd       Zeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddZy)	r   c                    t        |       S r   )AsyncSpeechWithRawResponser   s    r!   r"   zAsyncSpeech.with_raw_response}   s     *$//r#   c                    t        |       S r%   ) AsyncSpeechWithStreamingResponser   s    r!   r'   z#AsyncSpeech.with_streaming_response   s     055r#   Nr(   c                  K   ddi|xs i }| j                  dt        |||||||dt        j                         d{   t	        ||	|
|      t
        j                         d{   S 7 .7 w)r2   r3   r4   r5   r6   Nr:   r;   )r?   r   r   r@   r   r   rA   rB   s               r!   rC   zAsyncSpeech.create   s     l "#=W-BUSUWZZ,"""$0'6"%2 %77  )+Q[el %??#   
 
 	

s!   <A1A-
)A1(A/)A1/A1)rD   rN   )rD   rP   rE   rG   rK   r#   r!   r   r   |   s    0 0 6 6  (1Zc"+<E )-$("&;D#I
 I
 '	I


I
 %I
 XI
  I
 :I
 &I
 "I
   !I
" 9#I
$ 
5%I
r#   c                      e Zd ZddZy)r   c                Z    || _         t        j                  |j                        | _        y N)_speechr   to_raw_response_wrapperrC   r    speechs     r!   __init__zSpeechWithRawResponse.__init__   s"    &>>MM
r#   NrX   r   rD   NonerH   rI   rJ   rY   rK   r#   r!   r   r          
r#   r   c                      e Zd ZddZy)rN   c                Z    || _         t        j                  |j                        | _        y rT   )rU   r   async_to_raw_response_wrapperrC   rW   s     r!   rY   z#AsyncSpeechWithRawResponse.__init__   s"    &DDMM
r#   NrX   r   rD   r[   r\   rK   r#   r!   rN   rN      r]   r#   rN   c                      e Zd ZddZy)r&   c                P    || _         t        |j                  t              | _        y rT   )rU   r   rC   r   rW   s     r!   rY   z$SpeechWithStreamingResponse.__init__   s    9MM%
r#   NrZ   r\   rK   r#   r!   r&   r&          
r#   r&   c                      e Zd ZddZy)rP   c                P    || _         t        |j                  t              | _        y rT   )rU   r   rC   r   rW   s     r!   rY   z)AsyncSpeechWithStreamingResponse.__init__   s    ?MM*
r#   Nra   r\   rK   r#   r!   rP   rP      rd   r#   rP   ))
__future__r   typingr   typing_extensionsr   httpx r   _typesr   r	   r
   r   r   _utilsr   r   _compatr   	_resourcer   r   	_responser   r   r   r   types.audior   _base_clientr   types.audio.speech_modelr   __all__r   r   r   rN   r&   rP   rK   r#   r!   <module>ru      s    #  %    ? ? < & :  0 0 3]
#]
_ ]
@]
" ]
@
 

 

 

 
r#   