Complete guide to integrating Blindfold with your AI application
This guide walks you through integrating Blindfold with an AI application from start to finish. By the end, you’ll have a working chatbot that protects user PII before sending data to OpenAI.Estimated time: 15-20 minutes
Local mode is free forever. All SDKs include local mode with 86 regex-based entity types and all 8 operations — no API key, no signup, no network calls, no data leaves your infrastructure. Just install the SDK and use Blindfold() with no arguments. You only need the Cloud API below if you want NLP-powered detection (names, addresses, organizations) and compliance policies.
import osfrom blindfold import Blindfoldfrom openai import OpenAIfrom dotenv import load_dotenv# Load environment variablesload_dotenv()# Initialize clientsblindfold = Blindfold(api_key=os.getenv("BLINDFOLD_API_KEY"))openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))def chat_with_privacy(user_message: str) -> str: """ Process user message with PII protection """ print(f"\n👤 User: {user_message}") # Step 1: Tokenize sensitive data using GDPR policy try: protected = blindfold.tokenize( text=user_message, policy="gdpr_eu" # GDPR-compliant detection ) print(f"🔒 Protected: {protected.text}") print(f"🏷️ Detected {protected.entities_count} PII items") except Exception as e: print(f"❌ Error tokenizing: {e}") return "Sorry, I couldn't process your message securely." # Step 2: Send protected text to OpenAI try: completion = openai_client.chat.completions.create( model="gpt-4", messages=[ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": protected.text} ] ) ai_response = completion.choices[0].message.content print(f"🤖 AI (protected): {ai_response}") except Exception as e: print(f"❌ Error calling OpenAI: {e}") return "Sorry, I encountered an error with the AI service." # Step 3: Restore original data in response try: final_response = blindfold.detokenize( text=ai_response, mapping=protected.mapping ) print(f"✅ Final response: {final_response.text}") return final_response.text except Exception as e: print(f"❌ Error detokenizing: {e}") # If detokenization fails, return protected response return ai_response# Example usageif __name__ == "__main__": # Test with sensitive data messages = [ "My name is John Doe and my email is john@example.com", "I live at 123 Main Street, Boston, MA 02101", "My phone number is +1-555-123-4567" ] for message in messages: response = chat_with_privacy(message) print("-" * 80)
Run it:
python privacy_chatbot.py
Create privacy-chatbot.js:
import { Blindfold } from '@blindfold/sdk';import OpenAI from 'openai';import dotenv from 'dotenv';// Load environment variablesdotenv.config();// Initialize clientsconst blindfold = new Blindfold({ apiKey: process.env.BLINDFOLD_API_KEY});const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY});async function chatWithPrivacy(userMessage) { console.log(`\n👤 User: ${userMessage}`); // Step 1: Tokenize sensitive data using GDPR policy let protected; try { protected = await blindfold.tokenize(userMessage, { policy: "gdpr_eu" // GDPR-compliant detection }); console.log(`🔒 Protected: ${protected.text}`); console.log(`🏷️ Detected ${protected.entities_count} PII items`); } catch (error) { console.error(`❌ Error tokenizing: ${error.message}`); return "Sorry, I couldn't process your message securely."; } // Step 2: Send protected text to OpenAI let aiResponse; try { const completion = await openai.chat.completions.create({ model: "gpt-4", messages: [ { role: "system", content: "You are a helpful assistant." }, { role: "user", content: protected.text } ] }); aiResponse = completion.choices[0].message.content; console.log(`🤖 AI (protected): ${aiResponse}`); } catch (error) { console.error(`❌ Error calling OpenAI: ${error.message}`); return "Sorry, I encountered an error with the AI service."; } // Step 3: Restore original data in response try { const finalResponse = await blindfold.detokenize( aiResponse, protected.mapping ); console.log(`✅ Final response: ${finalResponse.text}`); return finalResponse.text; } catch (error) { console.error(`❌ Error detokenizing: ${error.message}`); // If detokenization fails, return protected response return aiResponse; }}// Example usageasync function main() { const messages = [ "My name is John Doe and my email is john@example.com", "I live at 123 Main Street, Boston, MA 02101", "My phone number is +1-555-123-4567" ]; for (const message of messages) { await chatWithPrivacy(message); console.log("-".repeat(80)); }}main().catch(console.error);
👤 User: My name is John Doe and my email is john@example.com🔒 Protected: My name is <person_1> and my email is <email_address_1>🏷️ Detected 2 PII items🤖 AI (protected): Hello <person_1>! I received your email at <email_address_1>.✅ Final response: Hello John Doe! I received your email at john@example.com.