Python API
Complete Python interface for profanity detection with cross-language parity
Complete Python interface for profanity detection with 100% feature parity to JavaScript, using Pythonic naming conventions (snake_case) while maintaining identical functionality and behavior.
Filter Class
The main Python interface using object-oriented design with persistent configuration.
Constructor
def __init__(self, config: FilterConfig | None = None) -> None:Example:
from glin_profanity import Filter
# Basic filter with default configuration
filter_instance = Filter()
# Advanced filter with custom configuration
advanced_filter = Filter({
"languages": ["english", "spanish"],
"enable_context_aware": True,
"context_window": 5,
"confidence_threshold": 0.8
})constructor(config?: FilterConfig)Example:
import { Filter } from 'glin-profanity';
// Basic filter with default configuration
const filter = new Filter();
// Advanced filter with custom configuration
const advancedFilter = new Filter({
languages: ['english', 'spanish'],
enableContextAware: true,
contextWindow: 5,
confidenceThreshold: 0.8
});FilterConfig Type Definition
| Prop | Type | Default |
|---|---|---|
languages? | list[Language] | None | None |
all_languages? | bool | False |
case_sensitive? | bool | False |
word_boundaries? | bool | True |
allow_obfuscated_match? | bool | False |
fuzzy_tolerance_level? | float | 0.8 |
custom_words? | list[str] | None | None |
ignore_words? | list[str] | None | globalWhitelist |
replace_with? | str | None | None |
severity_levels? | bool | False |
enable_context_aware? | bool | False |
context_window? | int | 3 |
confidence_threshold? | float | 0.7 |
domain_whitelists? | dict[str, list[str]] | None | None |
log_profanity? | bool | False |
Filter Methods
is_profane
def is_profane(self, value: str) -> bool:Example:
from glin_profanity import Filter
filter_instance = Filter()
print(filter_instance.is_profane("hello world")) # False
print(filter_instance.is_profane("damn good")) # True
print(filter_instance.is_profane("sh*t happens")) # True (with obfuscation)isProfane(value: string): booleanExample:
import { Filter } from 'glin-profanity';
const filter = new Filter();
console.log(filter.isProfane("hello world")); // false
console.log(filter.isProfane("damn good")); // true
console.log(filter.isProfane("sh*t happens")); // true (with obfuscation)matches
def matches(self, word: str) -> bool:Purpose: Alias for is_profane for API compatibility
Example:
filter_instance = Filter()
# Identical functionality to is_profane
print(filter_instance.matches("damn")) # True
print(filter_instance.is_profane("damn")) # Truematches(word: string): booleanPurpose: Alias for isProfane for API compatibility
Example:
const filter = new Filter();
// Identical functionality to isProfane
console.log(filter.matches("damn")); // true
console.log(filter.isProfane("damn")); // truecheck_profanity
def check_profanity(self, text: str) -> CheckProfanityResult:Example:
from glin_profanity import Filter
filter_instance = Filter({
"enable_context_aware": True,
"severity_levels": True
})
result = filter_instance.check_profanity("This movie is fucking amazing!")
print(result["contains_profanity"]) # False (positive context)
print(result["context_score"]) # 0.85 (positive sentiment)
print(result["reason"]) # "Positive emotional context detected"checkProfanity(text: string): CheckProfanityResultExample:
import { Filter } from 'glin-profanity';
const filter = new Filter({
enableContextAware: true,
severityLevels: true
});
const result = filter.checkProfanity("This movie is fucking amazing!");
console.log(result.containsProfanity); // false (positive context)
console.log(result.contextScore); // 0.85 (positive sentiment)
console.log(result.reason); // "Positive emotional context detected"check_profanity_with_min_severity
def check_profanity_with_min_severity(
self, text: str, min_severity: SeverityLevel = SeverityLevel.EXACT
) -> dict[str, object]:Example:
from glin_profanity import Filter, SeverityLevel
filter_instance = Filter({"severity_levels": True})
text = "What the hell is this f*ck?"
# Only include FUZZY matches (obfuscated)
fuzzy_only = filter_instance.check_profanity_with_min_severity(
text, SeverityLevel.FUZZY
)
print(fuzzy_only["filtered_words"]) # ["f*ck"]
# Include all matches
all_matches = filter_instance.check_profanity_with_min_severity(
text, SeverityLevel.EXACT
)
print(all_matches["filtered_words"]) # ["hell", "f*ck"]checkProfanityWithMinSeverity(
text: string,
minSeverity: SeverityLevel = SeverityLevel.EXACT
): { filteredWords: string[]; result: CheckProfanityResult }Example:
import { Filter, SeverityLevel } from 'glin-profanity';
const filter = new Filter({ severityLevels: true });
const text = "What the hell is this f*ck?";
// Only include FUZZY matches (obfuscated)
const fuzzyOnly = filter.checkProfanityWithMinSeverity(text, SeverityLevel.FUZZY);
console.log(fuzzyOnly.filteredWords); // ["f*ck"]
// Include all matches
const allMatches = filter.checkProfanityWithMinSeverity(text, SeverityLevel.EXACT);
console.log(allMatches.filteredWords); // ["hell", "f*ck"]Type Definitions
CheckProfanityResult
| Prop | Type | Default |
|---|---|---|
contains_profanity? | bool | N/A |
profane_words? | list[str] | N/A |
processed_text? | str | None | None |
severity_map? | dict[str, SeverityLevel] | None | None |
matches? | list[Match] | None | None |
context_score? | float | None | None |
reason? | str | None | None |
Match
| Prop | Type | Default |
|---|---|---|
word? | str | N/A |
index? | int | N/A |
severity? | SeverityLevel | N/A |
context_score? | float | None | None |
reason? | str | None | None |
is_whitelisted? | bool | None | None |
SeverityLevel
class SeverityLevel(IntEnum):
EXACT = 1 # Direct/exact matches
FUZZY = 2 # Fuzzy/approximate matchesLanguage
Language = Literal[
"arabic", "chinese", "czech", "danish", "english", "esperanto",
"finnish", "french", "german", "hindi", "hungarian", "italian",
"japanese", "korean", "norwegian", "persian", "polish",
"portuguese", "russian", "spanish", "swedish", "thai", "turkish"
]Naming Conventions
JavaScript ↔ Python Mapping
# JavaScript camelCase -> Python snake_case
js_config = {
"enableContextAware": True, # -> "enable_context_aware"
"contextWindow": 5, # -> "context_window"
"confidenceThreshold": 0.8, # -> "confidence_threshold"
"allowObfuscatedMatch": True, # -> "allow_obfuscated_match"
"fuzzyToleranceLevel": 0.7, # -> "fuzzy_tolerance_level"
"domainWhitelists": {}, # -> "domain_whitelists"
"logProfanity": False, # -> "log_profanity"
"severityLevels": True # -> "severity_levels"
}
python_config = {
"enable_context_aware": True,
"context_window": 5,
"confidence_threshold": 0.8,
"allow_obfuscated_match": True,
"fuzzy_tolerance_level": 0.7,
"domain_whitelists": {},
"log_profanity": False,
"severity_levels": True
}# JavaScript camelCase -> Python snake_case
js_result = {
"containsProfanity": True, # -> "contains_profanity"
"profaneWords": ["damn"], # -> "profane_words"
"processedText": "...", # -> "processed_text"
"severityMap": {}, # -> "severity_map"
"contextScore": 0.5, # -> "context_score"
"isWhitelisted": False # -> "is_whitelisted"
}
python_result = {
"contains_profanity": True,
"profane_words": ["damn"],
"processed_text": "...",
"severity_map": {},
"context_score": 0.5,
"is_whitelisted": False
}# JavaScript camelCase -> Python snake_case
# JavaScript Filter class
filter.isProfane("text") # -> is_profane("text")
filter.checkProfanity("text") # -> check_profanity("text")
filter.checkProfanityWithMinSeverity("text") # -> check_profanity_with_min_severity("text")
# Python Filter class
filter_instance.is_profane("text")
filter_instance.check_profanity("text")
filter_instance.check_profanity_with_min_severity("text")Both APIs provide identical functionality and behavior - only the naming conventions differ to match language-specific standards (camelCase for JavaScript, snake_case for Python).
Framework Integration
Django Model Integration
from django.db import models
from django.core.exceptions import ValidationError
from glin_profanity import Filter
class Comment(models.Model):
content = models.TextField()
is_approved = models.BooleanField(default=False)
def clean(self):
# Validate content for profanity
filter_instance = Filter({
"enable_context_aware": True,
"confidence_threshold": 0.8,
"severity_levels": True
})
result = filter_instance.check_profanity(self.content)
if result["contains_profanity"]:
# Get severe words only
severe_result = filter_instance.check_profanity_with_min_severity(
self.content, SeverityLevel.FUZZY
)
if severe_result["filtered_words"]:
raise ValidationError(
f"Content contains inappropriate language: "
f"{', '.join(severe_result['filtered_words'])}"
)
# Auto-approve if context is positive
if result.get("context_score", 0) > 0.7:
self.is_approved = True
def save(self, *args, **kwargs):
self.clean()
super().save(*args, **kwargs)Flask API Integration
from flask import Flask, request, jsonify
from glin_profanity import Filter
app = Flask(__name__)
# Initialize filter with optimal configuration
content_filter = Filter({
"enable_context_aware": True,
"languages": ["english", "spanish"],
"severity_levels": True,
"domain_whitelists": {
"english": ["boss", "enemy", "game", "character"]
}
})
@app.route('/api/moderate', methods=['POST'])
def moderate_content():
data = request.json
text = data.get('text', '')
if not text:
return jsonify({"error": "Text is required"}), 400
try:
result = content_filter.check_profanity(text)
return jsonify({
"text": text,
"contains_profanity": result["contains_profanity"],
"profane_words": result["profane_words"],
"processed_text": result.get("processed_text"),
"context_score": result.get("context_score"),
"reason": result.get("reason"),
"severity_map": result.get("severity_map", {})
})
except Exception as e:
return jsonify({"error": str(e)}), 500
if __name__ == '__main__':
app.run(debug=True)Cross-References
- Core Functions - JavaScript equivalent functions
- Filter Class - JavaScript Filter class documentation
- React Hook - React integration patterns
- Quick Start - Basic Python usage examples
- Installation - Python package installation guide