Classifies if text and/or image inputs are potentially harmful. Learn more in the [moderation guide](/docs/guides/moderation).
Body
Required
input
string | array[string] | array[object] Required Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models.
model
string The content moderation model you would like to use. Learn more in the moderation guide, and learn about available models here.
POST
/moderations
curl \
--request POST 'https://api.openai.com/v1/moderations' \
--header "Authorization: Bearer $ACCESS_TOKEN" \
--header "Content-Type: application/json" \
--data '{"input":"I want to kill them.","model":"omni-moderation-2024-09-26"}'
Request examples
{
"input": "I want to kill them.",
"model": "omni-moderation-2024-09-26"
}
Response examples (200)
{
"id": "string",
"model": "string",
"results": [
{
"flagged": true,
"categories": {
"hate": true,
"hate/threatening": true,
"harassment": true,
"harassment/threatening": true,
"illicit": true,
"illicit/violent": true,
"self-harm": true,
"self-harm/intent": true,
"self-harm/instructions": true,
"sexual": true,
"sexual/minors": true,
"violence": true,
"violence/graphic": true
},
"category_scores": {
"hate": 42.0,
"hate/threatening": 42.0,
"harassment": 42.0,
"harassment/threatening": 42.0,
"illicit": 42.0,
"illicit/violent": 42.0,
"self-harm": 42.0,
"self-harm/intent": 42.0,
"self-harm/instructions": 42.0,
"sexual": 42.0,
"sexual/minors": 42.0,
"violence": 42.0,
"violence/graphic": 42.0
},
"category_applied_input_types": {
"hate": [
"text"
],
"hate/threatening": [
"text"
],
"harassment": [
"text"
],
"harassment/threatening": [
"text"
],
"illicit": [
"text"
],
"illicit/violent": [
"text"
],
"self-harm": [
"text"
],
"self-harm/intent": [
"text"
],
"self-harm/instructions": [
"text"
],
"sexual": [
"text"
],
"sexual/minors": [
"text"
],
"violence": [
"text"
],
"violence/graphic": [
"text"
]
}
}
]
}