choonkeat / elm-openai / OpenAI.Audio

https://platform.openai.com/docs/api-reference/audio

createTranscription : TranscriptionInput -> Ext.Http.TaskInput (Ext.Http.Error String) Output

https://platform.openai.com/docs/api-reference/audio/create

createTranslation : TranslationInput -> Ext.Http.TaskInput (Ext.Http.Error String) Output

https://platform.openai.com/docs/api-reference/audio/create


type Model
    = Whisper_1


type alias Output =
{ text : String }


type ResponseFormat
    = JsonFormat
    | VerboseJson
    | TextFormat
    | SrtFormat
    | VttFormat


type alias TranscriptionInput =
{ file : File
, model : Model
, prompt : Maybe String
, response_format : Maybe ResponseFormat
, temperature : Maybe Basics.Float
, language : Maybe String 
}

file: The the audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.

prompt: An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.

language: The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.


type alias TranslationInput =
{ file : File
, model : Model
, prompt : Maybe String
, response_format : Maybe ResponseFormat
, temperature : Maybe Basics.Float 
}