Skip to content

Quickstart

Getting started with Guardrail ML

Note

Go to app.useguardrail.com to get your API key

Colab Quickstart

Open In Colab

Quickly use our default Firewall

from guardrail import Firewall, OpenAI

firewall = Firewall()
openai = OpenAI(firewall)

prompt = """Create a Python script to insert a new customer into our e-commerce database using SQLAlchemy. The customer's information is provided in a dictionary as follows:
customer_info = {
    "name": "Emily Johnson",
    "email": "emily@customercompany.com",
    "alt_email": "emily.j@customercompany.com",
    "phone_number": "617-555-5555",
    "shipping_address": "123 Brattle Street, Cambridge, USA",
    "loyalty_program_key": "eyJhbGciOiJIUzI1NiJ9.eyJSb2xlIjoiCustomerIiLCJJc3N1ZXIiOiJJc3N1ZXIiLCJVc2VybmFtZSI6IkVtaIwx5Sm9obiIsImV4cCI6MTY5NDc5NTc2NiwiaWF0IjoxNjk0Nzk1NzY2fQ.JTQzKodRvZN4dPIr0RcEeWSk3PU-DFDJzA0mVP26qpE"
}
Ensure the script inserts this customer's data into the database and handles exceptions gracefully."""

openai.run_chat_completion(prompt)

Customizing your Firewall

from guardrail.firewall import Firewall 
from guardrail.firewall.vault import Vault
from guardrail.firewall.input_detectors import Anonymize, Secrets, PromptInjections, DoSTokens, StopInputSubstrings, MalwareInputURL, ToxicityInput
from guardrail.firewall.output_detectors import Deanonymize, SensitivePII, StopOutputSubstrings, FactualityDetector, Contradictions, MalwareOutputURL, ToxicityOutput

firewall = Firewall()

input_detectors = [Anonymize(vault), Secrets(), ToxicityInput() PromptInjections(), DoSTokens(), StopInputSubstrings(), MalwareInputURL()]
output_detectors = [Deanonymize(vault), FactualityDetector(), ToxicityOutput(), SensitivePII(), StopOutputSubstrings(), Contradictions(), MalwareOutputURL()]

sanitized_prompt, valid_results, risk_score = firewall.scan_input(prompt, input_detectors)

# Based on valid_results and risk_score's output - Use OpenAI, HuggingFace, Cohere, etc.

sanitized_response, valid_results, risk_score = firewall.scan_output(sanitized_prompt, response_text, output_detectors)

🦜️🔗 Using LangChain

from guardrail.firewall import Firewall 
from guardrail.tracker.langchain import GuardrailCallbackHandler

from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain

firewall = Firewall()

llm = OpenAI(callbacks=[GuardrailCallbackHandler(firewall)])

template = """Question: {question}
Answer: Let's think step by step."""

prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=llm)

question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
llm_chain.run(question)