Skip to main content

AI Guard | Golang SDK

Text guard

func (e *aiGuard) GuardText(ctx context.Context, input *TextGuardRequest) (*pangea.PangeaResponse[TextGuardResult], error)

Guard text.

context.Context
input := &ai_guard.TextGuardRequest{Text: "hello world"}
response, err := client.GuardText(ctx, input)

Type AnalyzerResponse

type AnalyzerResponse struct

string `json:"analyzer"`
float64 `json:"confidence"`
type AnalyzerResponse struct {
	Analyzer	string	`json:"analyzer"`
	Confidence	float64	`json:"confidence"`
}

Type Client

type Client interface

AI Guard API client.

func(ctx context.Context, input *TextGuardRequest) (*pangea.PangeaResponse[TextGuardResult], error)
context.Context
pangea.BaseServicer

Base service methods.

type Client interface {
	GuardText(ctx context.Context, input *TextGuardRequest) (*pangea.PangeaResponse[TextGuardResult], error)

	// Base service methods.
	pangea.BaseServicer
}

Type CodeDetectionResult

type CodeDetectionResult struct

string `json:"language"`
string `json:"action"`
type CodeDetectionResult struct {
	Language	string	`json:"language"`
	Action		string	`json:"action"`
}

Type LanguageDetectionResult

type LanguageDetectionResult struct

string `json:"language"`
string `json:"action"`
type LanguageDetectionResult struct {
	Language	string	`json:"language"`
	Action		string	`json:"action"`
}

Type LogFields

type LogFields struct

LogFields are additional fields to include in activity log

string `json:"citations,omitempty"`
string `json:"extra_info,omitempty"`
string `json:"model,omitempty"`
string `json:"source,omitempty"`
string `json:"tools,omitempty"`
type LogFields struct {
	Citations	string	`json:"citations,omitempty"`	// Origin or source application of the event
	ExtraInfo	string	`json:"extra_info,omitempty"`	// Stores supplementary details related to the event
	Model		string	`json:"model,omitempty"`	// Model used to perform the event
	Source		string	`json:"source,omitempty"`	// IP address of user or app or agent
	Tools		string	`json:"tools,omitempty"`	// Tools used to perform the event
}

Type MaliciousEntity

type MaliciousEntity struct

string `json:"type"`
string `json:"value"`
string `json:"action"`
*int `json:"start_pos,omitempty"`
map[string]interface{} `json:"raw,omitempty"`
type MaliciousEntity struct {
	Type		string			`json:"type"`
	Value		string			`json:"value"`
	Action		string			`json:"action"`
	StartPos	*int			`json:"start_pos,omitempty"`
	Raw		map[string]interface{}	`json:"raw,omitempty"`
}

Type MaliciousEntityResult

type MaliciousEntityResult struct

[]MaliciousEntity `json:"entities"`
type MaliciousEntityResult struct {
	Entities []MaliciousEntity `json:"entities"`
}

Type PiiEntity

type PiiEntity struct

string `json:"type"`
string `json:"value"`
string `json:"action"`
*int `json:"start_pos,omitempty"`
type PiiEntity struct {
	Type		string	`json:"type"`
	Value		string	`json:"value"`
	Action		string	`json:"action"`
	StartPos	*int	`json:"start_pos,omitempty"`
}

Type PiiEntityResult

type PiiEntityResult struct

[]PiiEntity `json:"entities"`
type PiiEntityResult struct {
	Entities []PiiEntity `json:"entities"`
}

Type PromptInjectionResult

type PromptInjectionResult struct

string `json:"action"`
type PromptInjectionResult struct {
	Action			string			`json:"action"`
	AnalyzerResponses	[]AnalyzerResponse	`json:"analyzer_responses"`
}

Type SecretsEntity

type SecretsEntity struct

string `json:"type"`
string `json:"value"`
string `json:"action"`
*int `json:"start_pos,omitempty"`
string `json:"redacted_value,omitempty"`
type SecretsEntity struct {
	Type		string	`json:"type"`
	Value		string	`json:"value"`
	Action		string	`json:"action"`
	StartPos	*int	`json:"start_pos,omitempty"`
	RedactedValue	string	`json:"redacted_value,omitempty"`
}

Type SecretsEntityResult

type SecretsEntityResult struct

[]SecretsEntity `json:"entities"`
type SecretsEntityResult struct {
	Entities []SecretsEntity `json:"entities"`
}

Type TextGuardDetector

type TextGuardDetector[T any] struct

any
bool `json:"detected"`
*T `json:"data,omitempty"`
type TextGuardDetector[T any] struct {
	Detected	bool	`json:"detected"`
	Data		*T	`json:"data,omitempty"`
}

Type TextGuardDetectors

type TextGuardDetectors struct

*TextGuardDetector[PromptInjectionResult] `json:"prompt_injection,omitempty"`
*TextGuardDetector[PiiEntityResult] `json:"pii_entity,omitempty"`
*TextGuardDetector[MaliciousEntityResult] `json:"malicious_entity,omitempty"`
*TextGuardDetector[SecretsEntityResult] `json:"secrets_detection,omitempty"`
*TextGuardDetector[any] `json:"profanity_and_toxicity,omitempty"`
*TextGuardDetector[any] `json:"custom_entity,omitempty"`
*TextGuardDetector[LanguageDetectionResult] `json:"language_detection,omitempty"`
*TextGuardDetector[CodeDetectionResult] `json:"code_detection,omitempty"`
type TextGuardDetectors struct {
	PromptInjection		*TextGuardDetector[PromptInjectionResult]	`json:"prompt_injection,omitempty"`
	PiiEntity		*TextGuardDetector[PiiEntityResult]		`json:"pii_entity,omitempty"`
	MaliciousEntity		*TextGuardDetector[MaliciousEntityResult]	`json:"malicious_entity,omitempty"`
	SecretsDetection	*TextGuardDetector[SecretsEntityResult]		`json:"secrets_detection,omitempty"`
	ProfanityAndToxicity	*TextGuardDetector[any]				`json:"profanity_and_toxicity,omitempty"`
	CustomEntity		*TextGuardDetector[any]				`json:"custom_entity,omitempty"`
	LanguageDetection	*TextGuardDetector[LanguageDetectionResult]	`json:"language_detection,omitempty"`
	CodeDetection		*TextGuardDetector[CodeDetectionResult]		`json:"code_detection,omitempty"`
}

Type TextGuardRequest

type TextGuardRequest struct

string `json:"text,omitempty"`
any `json:"messages,omitempty"`
any `json:"llm_input,omitempty"`
string `json:"recipe,omitempty"`
bool `json:"debug,omitempty"`
string `json:"llm_info,omitempty"`
type TextGuardRequest struct {
	pangea.BaseRequest

	Text		string		`json:"text,omitempty"`		// Text to be scanned by AI Guard for PII, sensitive data, malicious content, and other data types defined by the configuration. Supports processing up to 10KB of text.
	Messages	any		`json:"messages,omitempty"`	// Structured messages data to be scanned by AI Guard for PII, sensitive data, malicious content, and other data types defined by the configuration. Supports processing up to 10KB of JSON text.
	LlmInput	any		`json:"llm_input,omitempty"`	// Structured full llm payload data to be scanned by AI Guard for PII, sensitive data, malicious content, and other data types defined by the configuration. Supports processing up to 10KB of JSON text.
	Recipe		string		`json:"recipe,omitempty"`	// Recipe key of a configuration of data types and settings defined in the Pangea User Console. It specifies the rules that are to be applied to the text, such as defang malicious URLs.
	Debug		bool		`json:"debug,omitempty"`	// Setting this value to true will provide a detailed analysis of the text data
	LlmInfo		string		`json:"llm_info,omitempty"`	// Short string hint for the LLM Provider information
	LogFields	LogFields	`json:"log_fields,omitempty"`	// Additional fields to include in activity log
}

Type TextGuardResult

type TextGuardResult struct

TextGuardDetectors `json:"detectors"`
string `json:"prompt_text"`
any `json:"prompt_messages"`
bool `json:"blocked"`
type TextGuardResult struct {
	Detectors	TextGuardDetectors	`json:"detectors"`		// Result of the recipe analyzing and input prompt.
	PromptText	string			`json:"prompt_text"`		// Updated prompt text, if applicable.
	PromptMessages	any			`json:"prompt_messages"`	// Updated prompt messages, if applicable.
	Blocked		bool			`json:"blocked"`
}

Type aiGuard

type aiGuard struct
type aiGuard struct {
	pangea.BaseService
}