choonkeat / elm-openai / OpenAI.Completion

Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.

See https://beta.openai.com/docs/api-reference/completions

create : Input -> Ext.Http.TaskInput (Ext.Http.Error String) Output

https://beta.openai.com/docs/api-reference/completions/create

create
    { model = OpenAI.ModelID.TextDavinci003
    , prompt = "If I were a farmer's son"
    , suffix = Nothing
    , max_tokens = Nothing
    , temperature = Nothing
    , top_p = Nothing
    , n = Nothing
    , stream = Nothing
    , logprobs = Nothing
    , echo = Nothing
    , stop = Nothing
    , presence_penalty = Nothing
    , frequency_penalty = Nothing
    , best_of = Nothing
    , logit_bias = Nothing
    , user = Nothing
    }
    |> OpenAI.withConfig cfg
    |> Http.task
-- > Task.succeed
-- >     { choices =
-- >         [ { finish_reason = "length"
-- >           , index = 0
-- >           , logprobs = Nothing
-- >           , text = "\n\nIf I were a farmer's son
-- >           , I would rise early each morning"
-- >           }
-- >         ]
-- >     , created = Posix ...
-- >     , id = "cmpl-..."
-- >     , model = TextDavinci003
-- >     , object = "text_completion"
-- >     , usage =
-- >         { completion_tokens = 16
-- >         , prompt_tokens = 7
-- >         , total_tokens = 23
-- >         }
-- >     }


type alias Input =
{ model : OpenAI.ModelID.ModelID
, prompt : String
, suffix : Maybe String
, max_tokens : Maybe Basics.Int
, temperature : Maybe Basics.Float
, top_p : Maybe Basics.Float
, n : Maybe Basics.Int
, stream : Maybe Basics.Bool
, logprobs : Maybe Basics.Int
, echo : Maybe Basics.Bool
, stop : Maybe (List String)
, presence_penalty : Maybe Basics.Float
, frequency_penalty : Maybe Basics.Float
, best_of : Maybe Basics.Int
, logit_bias : Maybe (Dict String Basics.Int)
, user : Maybe String 
}

If stream is set to True, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. This is unsupported in this library for now


type alias Output =
{ id : String
, object : String
, created : Time.Posix
, model : OpenAI.ModelID.ModelID
, choices : List Choice
, usage : OpenAI.Common.Usage 
}


type alias Choice =
{ text : String
, index : Basics.Int
, logprobs : Maybe Logprob
, finish_reason : Maybe String 
}


type alias Logprob =
{ tokens : List String
, token_logprobs : List Basics.Float
, top_logprobs : Dict String Basics.Float
, text_offset : List Basics.Int 
}