choonkeat / elm-openai / OpenAI.Chat

https://platform.openai.com/docs/api-reference/chat/create

Given a chat conversation, the model will return a chat completion response.

create : Input -> Ext.Http.TaskInput (Ext.Http.Error String) Output

OpenAI.Chat.create
    { model = GPT3_5_Turbo
    , messages =
        [ ChatMessage SystemRole
            "You are an AI assistant whose goal is to promote the Elm programming language."
        , ChatMessage UserRole
            "What is the best way to learn Elm?"
        ]
    , temperature = Nothing
    , top_p = Nothing
    , n = Nothing
    , stream = Nothing
    , stop = Nothing
    , max_tokens = Nothing
    , presence_penalty = Nothing
    , frequency_penalty = Nothing
    , logit_bias = Nothing
    , user = Nothing
    }
    |> OpenAI.withConfig cfg
    |> Http.task


type alias Input =
{ model : ModelID
, messages : List ChatMessage
, temperature : Maybe Basics.Float
, top_p : Maybe Basics.Float
, n : Maybe Basics.Int
, stream : Maybe Basics.Bool
, stop : Maybe (List String)
, max_tokens : Maybe Basics.Int
, presence_penalty : Maybe Basics.Float
, frequency_penalty : Maybe Basics.Float
, logit_bias : Maybe (Dict String Basics.Int)
, user : Maybe String 
}

See https://platform.openai.com/docs/api-reference/chat/create

If stream is set to True, response will stream back partial progress. If set, tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. This is unsupported in this library for now.


type alias ChatMessage =
{ role : ChatMessageRole
, content : String 
}


type ChatMessageRole
    = SystemRole
    | UserRole
    | AssistantRole


type ModelID
    = GPT3_5_Turbo
    | GPT3_5_Turbo_0301


type alias Output =
{ id : String
, object : String
, created : Time.Posix
, choices : List Choice
, usage : OpenAI.Common.Usage 
}


type alias Choice =
{ index : Basics.Int
, message : ChatMessage
, finish_reason : Maybe String 
}