clients.openai.moderation #
OpenAI Moderation Client
This directory contains the V client for OpenAI's Moderation API.
import incubaid.herolib.clients.openai
mut client:= openai.get()! //will be the default client, key is in `AIKEY` on environment variable or `OPENROUTER_API_KEY`
text_to_moderate := 'I want to kill them all.'
resp := client.moderation.create_moderation(
input: text_to_moderate
)
if resp.results.len > 0 {
if resp.results[0].flagged {
println('Text was flagged for moderation.')
println('Categories: ${resp.results[0].categories}')
} else {
println('Text passed moderation.')
}
} else {
eprintln('Failed to get moderation result.')
}
fn (OpenAIAlias) create_moderation #
fn (mut f OpenAIAlias) create_moderation(input string, model ModerationModel) !ModerationResponse
enum ModerationModel #
enum ModerationModel {
text_moderation_latest
text_moderation_stable
}
struct ModerationRequest #
@[params]
struct ModerationRequest {
mut:
input string
model string
}
struct ModerationResponse #
struct ModerationResponse {
pub mut:
id string
model string
results []ModerationResult
}
struct ModerationResult #
struct ModerationResult {
pub mut:
categories ModerationResultCategories
category_scores ModerationResultCategoryScores
flagged bool
}
struct ModerationResultCategories #
struct ModerationResultCategories {
pub mut:
sexual bool
hate bool
harassment bool
selfharm bool @[json: 'self-harm']
sexual_minors bool @[json: 'sexual/minors']
hate_threatening bool @[json: 'hate/threatening']
violence_graphic bool @[json: 'violence/graphic']
selfharm_intent bool @[json: 'self-harm/intent']
selfharm_instructions bool @[json: 'self-harm/instructions']
harassment_threatening bool @[json: 'harassment/threatening']
violence bool
}
struct ModerationResultCategoryScores #
struct ModerationResultCategoryScores {
pub mut:
sexual f32
hate f32
harassment f32
selfharm f32 @[json: 'self-harm']
sexual_minors f32 @[json: 'sexual/minors']
hate_threatening f32 @[json: 'hate/threatening']
violence_graphic f32 @[json: 'violence/graphic']
selfharm_intent f32 @[json: 'self-harm/intent']
selfharm_instructions f32 @[json: 'self-harm/instructions']
harassment_threatening f32 @[json: 'harassment/threatening']
violence f32
}