Server Integration
Server-side usage patterns for Node.js, Next.js, and Python Flask applications
Complete server-side integration examples for popular backend frameworks. These patterns enable content moderation at the API level, providing secure and scalable profanity filtering for web applications.
Server-side filtering provides better security than client-side validation since it cannot be bypassed by users. These examples demonstrate production-ready patterns with proper error handling and performance optimization.
Node.js Express Middleware
Reusable Express middleware for automatic content moderation across multiple routes:
const { checkProfanity } = require('glin-profanity');
// Basic profanity filtering middleware
const profanityFilter = (config = {}) => {
const defaultConfig = {
languages: ['english'],
enableContextAware: true,
autoReplace: false,
severityFilter: 'MODERATE'
};
const filterConfig = { ...defaultConfig, ...config };
return (req, res, next) => {
// Check request body for text fields
const textFields = ['content', 'message', 'comment', 'title', 'description'];
for (const field of textFields) {
if (req.body[field]) {
const result = checkProfanity(req.body[field], filterConfig);
if (result.containsProfanity) {
return res.status(400).json({
error: 'Content contains inappropriate language',
field: field,
flaggedWords: result.profaneWords,
severity: result.maxSeverity
});
}
}
}
next();
};
};
module.exports = profanityFilter;const { Filter } = require('glin-profanity');
// Advanced middleware with caching and custom actions
class ProfanityMiddleware {
constructor(config = {}) {
this.filter = new Filter({
languages: ['english', 'spanish'],
enableContextAware: true,
confidenceThreshold: 0.7,
severityFilter: 'MODERATE',
enableCaching: true,
...config
});
this.cache = new Map();
this.maxCacheSize = 1000;
}
// Get cached result or perform new check
checkWithCache(text) {
if (this.cache.has(text)) {
return this.cache.get(text);
}
const result = this.filter.checkProfanity(text);
// Manage cache size
if (this.cache.size >= this.maxCacheSize) {
const firstKey = this.cache.keys().next().value;
this.cache.delete(firstKey);
}
this.cache.set(text, result);
return result;
}
// Middleware function
middleware() {
return (req, res, next) => {
const textFields = this.extractTextFields(req.body);
const violations = [];
for (const [field, text] of textFields) {
const result = this.checkWithCache(text);
if (result.containsProfanity) {
violations.push({
field,
flaggedWords: result.profaneWords,
severity: result.maxSeverity,
confidence: result.confidence
});
}
}
if (violations.length > 0) {
return this.handleViolations(req, res, violations);
}
next();
};
}
extractTextFields(body) {
const fields = [];
const textFieldNames = ['content', 'message', 'comment', 'title', 'description'];
for (const field of textFieldNames) {
if (body[field] && typeof body[field] === 'string') {
fields.push([field, body[field]]);
}
}
return fields;
}
handleViolations(req, res, violations) {
// Custom handling based on severity
const hasSevere = violations.some(v => v.severity === 'SEVERE');
if (hasSevere) {
return res.status(403).json({
error: 'Content violates community guidelines',
violations,
action: 'blocked'
});
}
return res.status(400).json({
error: 'Content requires review',
violations,
action: 'review_required'
});
}
}
module.exports = ProfanityMiddleware;const express = require('express');
const profanityFilter = require('./middleware/profanityFilter');
const ProfanityMiddleware = require('./middleware/advancedProfanityFilter');
const app = express();
app.use(express.json());
// Basic usage - apply to all routes
app.use(profanityFilter({
languages: ['english'],
severityFilter: 'MODERATE'
}));
// Advanced usage - specific routes
const advancedFilter = new ProfanityMiddleware({
languages: ['english', 'spanish'],
enableContextAware: true
});
// Comments endpoint with strict filtering
app.post('/api/comments', advancedFilter.middleware(), (req, res) => {
// Content is already validated by middleware
res.json({ message: 'Comment posted successfully' });
});
// User profiles with lenient filtering
app.put('/api/profile', profanityFilter({
severityFilter: 'SEVERE',
enableContextAware: true
}), (req, res) => {
res.json({ message: 'Profile updated' });
});
app.listen(3000, () => {
console.log('Server running on port 3000');
});Key Features:
- ✅ Automatic field detection for content, message, comment, title, description
- ✅ Caching system for improved performance with repeated content
- ✅ Severity-based responses with different HTTP status codes
- ✅ Configurable per route for different content types
Next.js API Routes (SSR)
Server-side rendering and API route integration for Next.js applications:
import type { NextApiRequest, NextApiResponse } from 'next';
import { checkProfanity } from 'glin-profanity';
type ModerationRequest = {
content: string;
contentType?: 'comment' | 'post' | 'message';
userId?: string;
};
type ModerationResponse = {
approved: boolean;
content?: string;
flaggedWords?: string[];
severity?: string;
action: 'approve' | 'reject' | 'review';
reason?: string;
};
export default async function handler(
req: NextApiRequest,
res: NextApiResponse<ModerationResponse | { error: string }>
) {
if (req.method !== 'POST') {
return res.status(405).json({ error: 'Method not allowed' });
}
const { content, contentType = 'comment', userId }: ModerationRequest = req.body;
if (!content || typeof content !== 'string') {
return res.status(400).json({ error: 'Content is required' });
}
try {
// Different configs for different content types
const configs = {
comment: {
languages: ['english'],
enableContextAware: true,
severityFilter: 'MODERATE'
},
post: {
languages: ['english', 'spanish'],
enableContextAware: true,
severityFilter: 'MILD'
},
message: {
languages: ['english'],
enableContextAware: true,
severityFilter: 'SEVERE',
autoReplace: true
}
};
const config = configs[contentType] || configs.comment;
const result = checkProfanity(content, config);
if (result.containsProfanity) {
const severity = result.maxSeverity;
// Auto-approve mild profanity in positive context
if (severity === 'MILD' && result.contextScore > 0.7) {
return res.status(200).json({
approved: true,
content: result.processedText || content,
action: 'approve',
reason: 'Positive context detected'
});
}
// Reject severe profanity
if (severity === 'SEVERE') {
return res.status(200).json({
approved: false,
flaggedWords: result.profaneWords,
severity,
action: 'reject',
reason: 'Severe profanity detected'
});
}
// Moderate profanity requires review
return res.status(200).json({
approved: false,
flaggedWords: result.profaneWords,
severity,
action: 'review',
reason: 'Content flagged for manual review'
});
}
// Content is clean
res.status(200).json({
approved: true,
content,
action: 'approve'
});
} catch (error) {
console.error('Moderation error:', error);
res.status(500).json({ error: 'Internal server error' });
}
}import { NextRequest, NextResponse } from 'next/server';
import { checkProfanity } from 'glin-profanity';
export async function middleware(request: NextRequest) {
// Only check POST requests to API routes
if (request.method !== 'POST' || !request.nextUrl.pathname.startsWith('/api/')) {
return NextResponse.next();
}
// Skip moderation for certain endpoints
const skipPaths = ['/api/auth', '/api/health', '/api/moderate'];
if (skipPaths.some(path => request.nextUrl.pathname.startsWith(path))) {
return NextResponse.next();
}
try {
const body = await request.json();
const textFields = ['content', 'message', 'comment', 'title'];
for (const field of textFields) {
if (body[field]) {
const result = checkProfanity(body[field], {
languages: ['english'],
enableContextAware: true,
severityFilter: 'MODERATE'
});
if (result.containsProfanity) {
return NextResponse.json(
{
error: 'Content contains inappropriate language',
field,
flaggedWords: result.profaneWords
},
{ status: 400 }
);
}
}
}
// Re-create request with original body
const newRequest = new NextRequest(request.url, {
method: request.method,
headers: request.headers,
body: JSON.stringify(body)
});
return NextResponse.next();
} catch (error) {
console.error('Middleware error:', error);
return NextResponse.next();
}
}
export const config = {
matcher: '/api/:path*'
};import { GetServerSideProps } from 'next';
import { checkProfanity } from 'glin-profanity';
interface Comment {
id: string;
content: string;
author: string;
flagged?: boolean;
severity?: string;
}
interface CommentsPageProps {
comments: Comment[];
postId: string;
}
export default function CommentsPage({ comments, postId }: CommentsPageProps) {
return (
<div>
<h1>Comments for Post {postId}</h1>
{comments.map(comment => (
<div key={comment.id} className={comment.flagged ? 'flagged' : ''}>
<p>{comment.content}</p>
<small>By {comment.author}</small>
{comment.flagged && (
<span className="warning">
Content flagged ({comment.severity})
</span>
)}
</div>
))}
</div>
);
}
export const getServerSideProps: GetServerSideProps = async (context) => {
const { id } = context.params!;
// Fetch comments from database
const rawComments = await fetchCommentsFromDB(id as string);
// Server-side content moderation
const moderatedComments = await Promise.all(
rawComments.map(async (comment) => {
const result = checkProfanity(comment.content, {
languages: ['english'],
enableContextAware: true,
severityFilter: 'MILD'
});
return {
...comment,
flagged: result.containsProfanity,
severity: result.maxSeverity,
// Optionally replace content on server-side
content: result.containsProfanity && result.processedText
? result.processedText
: comment.content
};
})
);
return {
props: {
comments: moderatedComments,
postId: id as string
}
};
};
// Mock database function
async function fetchCommentsFromDB(postId: string): Promise<Comment[]> {
// Implementation would fetch from your database
return [];
}Key Features:
- ✅ TypeScript support with proper type definitions
- ✅ Content-type specific configs for different moderation levels
- ✅ Server-side pre-filtering during SSR for better performance
- ✅ Middleware integration for automatic API protection
Python Flask Endpoint
Flask application integration with request validation and response formatting:
from flask import Flask, request, jsonify
from glin_profanity import Filter
import logging
app = Flask(__name__)
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Initialize profanity filter
profanity_filter = Filter({
"languages": ["english"],
"enable_context_aware": True,
"severity_filter": "MODERATE",
"auto_replace": False
})
@app.route('/api/moderate', methods=['POST'])
def moderate_content():
try:
data = request.get_json()
if not data or 'content' not in data:
return jsonify({"error": "Content is required"}), 400
content = data['content']
content_type = data.get('content_type', 'comment')
# Check for profanity
result = profanity_filter.check_profanity(content)
if result["contains_profanity"]:
logger.warning(f"Profanity detected in {content_type}: {result['profane_words']}")
return jsonify({
"approved": False,
"flagged_words": result["profane_words"],
"severity": result.get("max_severity"),
"action": "review_required",
"reason": "Content contains inappropriate language"
}), 200
# Content is clean
return jsonify({
"approved": True,
"content": content,
"action": "approve"
}), 200
except Exception as e:
logger.error(f"Moderation error: {str(e)}")
return jsonify({"error": "Internal server error"}), 500
@app.route('/api/comments', methods=['POST'])
def create_comment():
try:
data = request.get_json()
if not data or 'comment' not in data:
return jsonify({"error": "Comment content is required"}), 400
# Moderate comment content
result = profanity_filter.check_profanity(data['comment'])
if result["contains_profanity"]:
# Auto-reject severe profanity
if result.get("max_severity") == "SEVERE":
return jsonify({
"error": "Comment violates community guidelines",
"action": "rejected"
}), 403
# Queue moderate profanity for review
return jsonify({
"message": "Comment submitted for review",
"action": "pending_review",
"comment_id": None # Would be generated after review
}), 202
# Save clean comment to database
comment_id = save_comment_to_db(data['comment'], data.get('author'))
return jsonify({
"message": "Comment posted successfully",
"comment_id": comment_id,
"action": "approved"
}), 201
except Exception as e:
logger.error(f"Comment creation error: {str(e)}")
return jsonify({"error": "Failed to create comment"}), 500
def save_comment_to_db(content, author):
# Mock database save
import uuid
return str(uuid.uuid4())
if __name__ == '__main__':
app.run(debug=True)from flask import Blueprint, request, jsonify, current_app
from glin_profanity import Filter
from functools import wraps
import time
import hashlib
moderation_bp = Blueprint('moderation', __name__)
class ModerationService:
def __init__(self):
self.filter = Filter({
"languages": ["english", "spanish"],
"enable_context_aware": True,
"confidence_threshold": 0.7,
"severity_filter": "MODERATE",
"enable_caching": True
})
self.cache = {}
self.cache_ttl = 300 # 5 minutes
def get_cache_key(self, content):
return hashlib.md5(content.encode()).hexdigest()
def check_content(self, content, content_type="general"):
# Check cache first
cache_key = self.get_cache_key(content)
now = time.time()
if cache_key in self.cache:
cached_result, timestamp = self.cache[cache_key]
if now - timestamp < self.cache_ttl:
return cached_result
else:
del self.cache[cache_key]
# Perform moderation
result = self.filter.check_profanity(content)
# Add to cache
self.cache[cache_key] = (result, now)
# Clean old cache entries (simple cleanup)
if len(self.cache) > 1000:
old_keys = [k for k, (_, ts) in self.cache.items()
if now - ts > self.cache_ttl]
for k in old_keys:
del self.cache[k]
return result
def format_response(self, result, content):
if not result["contains_profanity"]:
return {
"approved": True,
"content": content,
"action": "approve"
}
severity = result.get("max_severity", "UNKNOWN")
response = {
"approved": False,
"flagged_words": result["profane_words"],
"severity": severity,
"confidence": result.get("confidence"),
"context_score": result.get("context_score")
}
# Determine action based on severity and context
if severity == "SEVERE":
response.update({
"action": "reject",
"reason": "Severe profanity detected"
})
elif result.get("context_score", 0) > 0.8:
response.update({
"action": "approve_with_warning",
"reason": "Positive context detected"
})
else:
response.update({
"action": "review_required",
"reason": "Manual review needed"
})
return response
# Initialize service
moderation_service = ModerationService()
def require_content_moderation(f):
@wraps(f)
def decorated_function(*args, **kwargs):
data = request.get_json()
# Extract text fields for moderation
text_fields = ['content', 'message', 'comment', 'title', 'description']
violations = []
for field in text_fields:
if field in data and isinstance(data[field], str):
result = moderation_service.check_content(data[field], field)
if result["contains_profanity"]:
violations.append({
"field": field,
"response": moderation_service.format_response(result, data[field])
})
if violations:
# Store violations for potential use in the route
request.moderation_violations = violations
# Reject if any severe violations
severe_violations = [v for v in violations
if v["response"]["severity"] == "SEVERE"]
if severe_violations:
return jsonify({
"error": "Content violates community guidelines",
"violations": severe_violations
}), 403
return f(*args, **kwargs)
return decorated_function
@moderation_bp.route('/moderate', methods=['POST'])
def moderate():
data = request.get_json()
if not data or 'content' not in data:
return jsonify({"error": "Content is required"}), 400
result = moderation_service.check_content(
data['content'],
data.get('content_type', 'general')
)
response = moderation_service.format_response(result, data['content'])
return jsonify(response)
@moderation_bp.route('/comments', methods=['POST'])
@require_content_moderation
def create_comment():
data = request.get_json()
# Check if moderation found violations
if hasattr(request, 'moderation_violations'):
violations = request.moderation_violations
review_required = any(v["response"]["action"] == "review_required"
for v in violations)
if review_required:
return jsonify({
"message": "Comment submitted for review",
"status": "pending_review",
"violations": violations
}), 202
# Process clean content
comment_id = save_comment(data['comment'], data.get('author'))
return jsonify({
"message": "Comment created successfully",
"comment_id": comment_id
}), 201
def save_comment(content, author):
# Mock save function
import uuid
return str(uuid.uuid4())from functools import wraps
from flask import request, jsonify, current_app
from glin_profanity import Filter
# Global filter instance
_filter_instance = None
def get_filter():
global _filter_instance
if _filter_instance is None:
_filter_instance = Filter({
"languages": ["english"],
"enable_context_aware": True,
"severity_filter": "MODERATE"
})
return _filter_instance
def profanity_check(fields=None, severity="MODERATE", action="reject"):
"""
Decorator for automatic profanity checking on Flask routes
Args:
fields: List of field names to check (default: ['content', 'message', 'comment'])
severity: Minimum severity to trigger action ("MILD", "MODERATE", "SEVERE")
action: Action to take ("reject", "warn", "log")
"""
if fields is None:
fields = ['content', 'message', 'comment']
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if request.method not in ['POST', 'PUT', 'PATCH']:
return f(*args, **kwargs)
data = request.get_json()
if not data:
return f(*args, **kwargs)
filter_instance = get_filter()
violations = []
for field in fields:
if field in data and isinstance(data[field], str):
result = filter_instance.check_profanity(data[field])
if result["contains_profanity"]:
max_severity = result.get("max_severity", "UNKNOWN")
# Check if severity meets threshold
severity_levels = {"MILD": 1, "MODERATE": 2, "SEVERE": 3}
if severity_levels.get(max_severity, 0) >= severity_levels.get(severity, 2):
violations.append({
"field": field,
"words": result["profane_words"],
"severity": max_severity,
"confidence": result.get("confidence")
})
if violations:
current_app.logger.warning(f"Profanity detected: {violations}")
if action == "reject":
return jsonify({
"error": "Content contains inappropriate language",
"violations": violations
}), 400
elif action == "warn":
# Add violations to request context for the route to handle
request.profanity_violations = violations
return f(*args, **kwargs)
return decorated_function
return decorator
# Usage examples:
# @profanity_check() # Default: check common fields, reject on MODERATE+
# @profanity_check(fields=['title', 'body'], severity="SEVERE", action="warn")
# @profanity_check(fields=['comment'], severity="MILD", action="log")from flask import Blueprint, request, jsonify
from decorators.profanity_check import profanity_check
posts_bp = Blueprint('posts', __name__)
@posts_bp.route('/posts', methods=['POST'])
@profanity_check(fields=['title', 'content'], severity="MODERATE")
def create_post():
data = request.get_json()
# Content is already validated by decorator
post_id = save_post(data['title'], data['content'], data.get('author'))
return jsonify({
"message": "Post created successfully",
"post_id": post_id
}), 201
@posts_bp.route('/posts/<post_id>/comments', methods=['POST'])
@profanity_check(fields=['comment'], severity="MILD", action="warn")
def add_comment(post_id):
data = request.get_json()
# Check for warnings from decorator
if hasattr(request, 'profanity_violations'):
# Handle warnings - maybe flag for review
return jsonify({
"message": "Comment flagged for review",
"status": "pending",
"violations": request.profanity_violations
}), 202
comment_id = save_comment(post_id, data['comment'], data.get('author'))
return jsonify({
"message": "Comment added successfully",
"comment_id": comment_id
}), 201
def save_post(title, content, author):
# Mock save
import uuid
return str(uuid.uuid4())
def save_comment(post_id, content, author):
# Mock save
import uuid
return str(uuid.uuid4())Key Features:
- ✅ Caching system with TTL for improved performance
- ✅ Decorator pattern for easy route protection
- ✅ Blueprint organization for modular applications
- ✅ Severity-based actions with configurable thresholds
Performance Considerations
Server-side filtering adds latency to API requests. Consider these optimization strategies for production applications.
Optimization Strategies
const redis = require('redis');
const { checkProfanity } = require('glin-profanity');
const client = redis.createClient();
const CACHE_TTL = 300; // 5 minutes
async function checkWithCache(content, config) {
const cacheKey = `profanity:${Buffer.from(content).toString('base64')}`;
try {
// Check cache first
const cached = await client.get(cacheKey);
if (cached) {
return JSON.parse(cached);
}
// Perform check
const result = checkProfanity(content, config);
// Cache result
await client.setex(cacheKey, CACHE_TTL, JSON.stringify(result));
return result;
} catch (error) {
// Fallback to direct check if cache fails
return checkProfanity(content, config);
}
}from celery import Celery
from glin_profanity import Filter
celery_app = Celery('moderation', broker='redis://localhost:6379')
@celery_app.task
def moderate_content_async(content_id, content, config):
"""Async content moderation task"""
filter_instance = Filter(config)
result = filter_instance.check_profanity(content)
# Update database with moderation result
update_content_moderation_status(content_id, result)
# Send notification if flagged
if result["contains_profanity"]:
notify_moderators(content_id, result)
return result
# Usage in Flask route
@app.route('/api/posts', methods=['POST'])
def create_post():
data = request.get_json()
# Save post as "pending"
post_id = save_post_pending(data['content'], data['author'])
# Queue for async moderation
moderate_content_async.delay(
post_id,
data['content'],
{"languages": ["english"], "severity_filter": "MODERATE"}
)
return jsonify({
"message": "Post submitted for review",
"post_id": post_id,
"status": "pending"
}), 202upstream moderation_service {
server 127.0.0.1:3001;
server 127.0.0.1:3002;
server 127.0.0.1:3003;
}
server {
listen 80;
location /api/moderate {
proxy_pass http://moderation_service;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
# Enable caching for GET requests
proxy_cache_valid 200 5m;
proxy_cache_key "$uri$request_body";
}
location / {
proxy_pass http://main_app;
}
}What's Next?
🔧 Core Functions
Complete API reference for checkProfanity and related methods
⚙️ Configuration Presets
Ready-to-use configurations for different server environments
🧠 Context-Aware Filtering
Advanced sentiment analysis for better accuracy
⚖️ Severity Levels
Understanding EXACT vs FUZZY classification for API responses
Pro Tip: Start with Express middleware or Flask decorators for quick integration, then optimize with caching and async processing based on your application's traffic patterns.