The moderation endpoint is a tool you can use to check whether content complies with OpenAI's content policy. Developers can thus identify content that our content policy prohibits and take action, for instance by filtering it.
See https://beta.openai.com/docs/guides/moderation/overview
create : Input -> Ext.Http.TaskInput (Ext.Http.Error String) Output
https://beta.openai.com/docs/api-reference/moderations/create
create
{ input = "I ** you"
, model = Just TextModerationLatest
}
|> OpenAI.withConfig cfg
|> Http.task
-- > Task.succeed
-- > { id = "modr-6a5SyUXa0D954a3h9hHi737O8vtM3"
-- > , model = Custom "text-moderation-004"
-- > , results =
-- > [ { categories = { hate = False, hate_threatening = False, self_harm = False, sexual = False, sexual_minors = False, violence = False, violence_graphic = False }
-- > , category_scores = { hate = 0.000005618692284770077, hate_threatening = 7.34394856038989e-9, self_harm = 1.334657184770549e-7, sexual = 0.001665698830038309, sexual_minors = 7.969669013618841e-7, violence = 0.000050713490054477006, violence_graphic = 4.190181073226995e-7 }
-- > , flagged = False
-- > }
-- > ]
-- > }
{ input : String
, model : Maybe Model
}
{ id : String
, model : OpenAI.ModelID.ModelID
, results : List Moderation
}
{ categories : Category
, category_scores : CategoryScore
, flagged : Basics.Bool
}
{ hate : Basics.Bool
, hate_threatening : Basics.Bool
, self_harm : Basics.Bool
, sexual : Basics.Bool
, sexual_minors : Basics.Bool
, violence : Basics.Bool
, violence_graphic : Basics.Bool
}
{ hate : Basics.Float
, hate_threatening : Basics.Float
, self_harm : Basics.Float
, sexual : Basics.Float
, sexual_minors : Basics.Float
, violence : Basics.Float
, violence_graphic : Basics.Float
}