# Anthropic Educational Courses
This repository contains a comprehensive collection of educational courses designed to teach developers how to work with Anthropic's Claude AI models. The courses cover everything from basic API fundamentals to advanced topics like prompt engineering, evaluations, tool use, and real-world applications. The materials are structured as progressive learning paths with interactive Jupyter notebooks, hands-on exercises, and practical code examples. The courses favor Claude 3 Haiku, Anthropic's fastest and most cost-effective model, to minimize API costs for students, though learners can use any Claude model variant.
The curriculum is organized into five main courses: API fundamentals for SDK basics, an interactive prompt engineering tutorial for mastering prompting techniques, real-world prompting for complex production scenarios, prompt evaluations for quality measurement, and tool use for implementing function calling capabilities. Each course builds upon previous concepts and includes practical exercises with answer keys to reinforce learning.
## Anthropic Python SDK - Basic Setup and Authentication
Initialize the Anthropic client and authenticate with your API key to start making requests to Claude models.
```python
from anthropic import Anthropic
from dotenv import load_dotenv
import os
# Load environment variables from .env file
load_dotenv()
# Initialize client - automatically looks for ANTHROPIC_API_KEY environment variable
client = Anthropic()
# Alternative: explicitly pass API key
my_api_key = os.getenv("ANTHROPIC_API_KEY")
client = Anthropic(api_key=my_api_key)
# Make your first request
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=1000,
messages=[
{"role": "user", "content": "Hi there! Please write me a haiku about a pet chicken"}
]
)
print(response.content[0].text)
# Output:
# Feathered friend clucking,
# Scratching in the dirt all day,
# Loyal pet chicken.
```
## Messages API - Creating Conversations with Claude
Send messages to Claude and receive responses using the messages.create() method with model selection, token limits, and conversation history.
```python
from anthropic import Anthropic
client = Anthropic()
# Single-turn conversation
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=500,
messages=[
{"role": "user", "content": "Multiply 1984135 by 9343116. Only respond with the result"}
]
)
print(response.content[0].text)
print(f"Stop reason: {response.stop_reason}")
print(f"Tokens used: {response.usage.output_tokens}")
# Multi-turn conversation with system prompt
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=1000,
system="You are a helpful foreign language tutor that always responds in French.",
messages=[
{"role": "user", "content": "Hey there, how are you?!"},
{"role": "assistant", "content": "Bonjour ! Comment allez-vous ?"},
{"role": "user", "content": "I'm doing well, thanks!"}
]
)
print(response.content[0].text)
```
## Model Parameters - Controlling Response Generation
Configure max_tokens, temperature, and stop_sequences to control how Claude generates responses.
```python
from anthropic import Anthropic
client = Anthropic()
# Using max_tokens to prevent truncation
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=10, # Very low - will truncate
messages=[{"role": "user", "content": "Write me a poem"}]
)
print(f"Truncated: {response.content[0].text}")
print(f"Stop reason: {response.stop_reason}") # Output: 'max_tokens'
# Using temperature for creativity control
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=100,
temperature=0, # Deterministic responses
messages=[{"role": "user", "content": "Come up with a name for an alien planet. Respond with a single word."}]
)
print(f"Low temp: {response.content[0].text}") # Likely same result each time
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=100,
temperature=1, # More creative/random
messages=[{"role": "user", "content": "Come up with a name for an alien planet. Respond with a single word."}]
)
print(f"High temp: {response.content[0].text}") # Varies each time
# Using stop_sequences for precise control
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=500,
messages=[{"role": "user", "content": "Generate a JSON object representing a person with a name, email, and phone number."}],
stop_sequences=["}"]
)
print(response.content[0].text + "}") # Add back the stop sequence
print(f"Stop sequence used: {response.stop_sequence}") # Output: '}'
```
## Tool Use - Simple Calculator Function
Enable Claude to call external tools by defining tool schemas and processing tool use requests.
```python
from anthropic import Anthropic
client = Anthropic()
# Define the actual function
def calculator(operation, operand1, operand2):
if operation == "add":
return operand1 + operand2
elif operation == "subtract":
return operand1 - operand2
elif operation == "multiply":
return operand1 * operand2
elif operation == "divide":
if operand2 == 0:
raise ValueError("Cannot divide by zero.")
return operand1 / operand2
else:
raise ValueError(f"Unsupported operation: {operation}")
# Define the tool schema
calculator_tool = {
"name": "calculator",
"description": "A simple calculator that performs basic arithmetic operations.",
"input_schema": {
"type": "object",
"properties": {
"operation": {
"type": "string",
"enum": ["add", "subtract", "multiply", "divide"],
"description": "The arithmetic operation to perform."
},
"operand1": {"type": "number", "description": "The first operand."},
"operand2": {"type": "number", "description": "The second operand."}
},
"required": ["operation", "operand1", "operand2"]
}
}
# Make request with tool
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=300,
system="You have access to tools, but only use them when necessary.",
messages=[{"role": "user", "content": "Multiply 1984135 by 9343116. Only respond with the result"}],
tools=[calculator_tool]
)
# Check if Claude wants to use the tool
if response.stop_reason == "tool_use":
tool_use = response.content[-1]
tool_name = tool_use.name
tool_input = tool_use.input
print(f"Claude wants to use: {tool_name}")
print(f"With inputs: {tool_input}")
# Execute the tool
result = calculator(
operation=tool_input["operation"],
operand1=tool_input["operand1"],
operand2=tool_input["operand2"]
)
print(f"Result: {result}")
# Output: 18538003464660
```
## Tool Use - Wikipedia Research Assistant
Build a research assistant that generates Wikipedia article links using tool use to validate real articles.
```python
from anthropic import Anthropic
import wikipedia
client = Anthropic()
def generate_wikipedia_reading_list(research_topic, article_titles):
wikipedia_articles = []
for t in article_titles:
results = wikipedia.search(t)
try:
page = wikipedia.page(results[0])
title = page.title
url = page.url
wikipedia_articles.append({"title": title, "url": url})
except:
continue
add_to_research_reading_file(wikipedia_articles, research_topic)
def add_to_research_reading_file(articles, topic):
with open("research_reading.md", "a", encoding="utf-8") as file:
file.write(f"## {topic} \n")
for article in articles:
title = article["title"]
url = article["url"]
file.write(f"* [{title}]({url}) \n")
file.write(f"\n\n")
# Define tool schema
wikipedia_tool = {
"name": "generate_wikipedia_reading_list",
"description": "Searches Wikipedia for article titles and saves valid article URLs to a markdown file for later reading.",
"input_schema": {
"type": "object",
"properties": {
"research_topic": {
"type": "string",
"description": "The overall research topic"
},
"article_titles": {
"type": "array",
"items": {"type": "string"},
"description": "List of potential Wikipedia article titles to search for"
}
},
"required": ["research_topic", "article_titles"]
}
}
def get_research_help(topic, num_articles=3):
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=1000,
messages=[{
"role": "user",
"content": f"I need help researching '{topic}'. Please generate {num_articles} relevant Wikipedia article titles that I should read."
}],
tools=[wikipedia_tool]
)
if response.stop_reason == "tool_use":
tool_use = response.content[-1]
tool_input = tool_use.input
generate_wikipedia_reading_list(
research_topic=tool_input["research_topic"],
article_titles=tool_input["article_titles"]
)
print(f"Generated reading list for: {topic}")
# Usage
get_research_help("Pirates Across The World", 7)
get_research_help("History of Hawaii", 3)
# Check research_reading.md for results
```
## Prompt Engineering - Clear and Direct Instructions
Structure prompts with clear instructions, proper formatting, and XML tags to separate data from instructions.
```python
from anthropic import Anthropic
client = Anthropic()
# Basic clear prompt
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=500,
messages=[{
"role": "user",
"content": "Write a haiku about the ocean. Use simple language appropriate for children."
}]
)
print(response.content[0].text)
# Using XML tags to separate data from instructions
customer_email = """
I ordered product #12345 two weeks ago and it still hasn't arrived.
This is completely unacceptable!
"""
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=500,
messages=[{
"role": "user",
"content": f"""You will be provided with a customer email. Your task is to classify the sentiment as positive, neutral, or negative.
{customer_email}
Please respond with a single word: positive, neutral, or negative."""
}]
)
print(response.content[0].text) # Output: negative
```
## Prompt Engineering - Role Prompting and Chain of Thought
Use role prompting and prefilled responses to guide Claude's behavior and reasoning process.
```python
from anthropic import Anthropic
client = Anthropic()
# Role prompting with system message
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=1000,
system="You are a skeptical scientist who always questions claims and asks for evidence.",
messages=[{
"role": "user",
"content": "A new supplement claims to cure all diseases. What do you think?"
}]
)
print(response.content[0].text)
# Chain of thought with prefilled assistant response
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=1000,
messages=[
{
"role": "user",
"content": "I had 23 chickens but 2 flew away. How many are left?"
},
{
"role": "assistant",
"content": "Let me think through this step by step:\n1."
}
]
)
print(response.content[0].text)
# Output continues the numbered reasoning process
```
## Prompt Evaluations - Simple Code-Graded Assessment
Create automated evaluations that compare Claude's outputs against expected answers using Python functions.
```python
from anthropic import Anthropic
client = Anthropic()
# Test cases with expected outputs
test_cases = [
{"input": "A dog runs through the yard", "expected": 4},
{"input": "The spider climbed up the wall", "expected": 8},
{"input": "A snake slithers across the sand", "expected": 0},
{"input": "The bird flew to its nest", "expected": 2}
]
def simple_prompt(animal_statement):
return f"""You will be provided a statement about an animal and your job is to determine how many legs that animal has.
Here is the animal statement.
{animal_statement}
How many legs does the animal have? Please respond with a number"""
def evaluate_prompt(prompt_func, test_cases):
correct = 0
total = len(test_cases)
for test in test_cases:
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=100,
messages=[{"role": "user", "content": prompt_func(test["input"])}]
)
try:
answer = int(response.content[0].text.strip())
if answer == test["expected"]:
correct += 1
print(f"✓ {test['input']}: {answer} (correct)")
else:
print(f"✗ {test['input']}: {answer} (expected {test['expected']})")
except ValueError:
print(f"✗ {test['input']}: Invalid output")
accuracy = correct / total
print(f"\nAccuracy: {accuracy:.1%} ({correct}/{total})")
return accuracy
# Run evaluation
evaluate_prompt(simple_prompt, test_cases)
```
## Prompt Evaluations - Model-Graded Evaluation
Use Claude itself to evaluate the quality of outputs based on multiple criteria with detailed scoring.
```python
import anthropic
import os
import json
def llm_eval(summary, article):
"""
Evaluate summary quality using Claude as a judge.
Returns average score and detailed evaluation.
"""
client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
prompt = f"""Evaluate the following summary based on these criteria:
1. Conciseness (1-5) - is the summary as concise as possible?
2. Accuracy (1-5) - is the summary completely accurate based on the initial article?
3. Tone (1-5) - is the summary appropriate for a grade school student?
Provide scores in JSON format:
{{
"conciseness": ,
"accuracy": ,
"tone": ,
"explanation":
}}
Original Text: {article}
Summary to Evaluate: {summary}
"""
response = client.messages.create(
model="claude-3-5-sonnet-20240620",
max_tokens=1000,
temperature=0,
messages=[
{"role": "user", "content": prompt},
{"role": "assistant", "content": ""}
],
stop_sequences=[""]
)
evaluation = json.loads(response.content[0].text)
numeric_values = [value for key, value in evaluation.items() if isinstance(value, (int, float))]
avg_score = sum(numeric_values) / len(numeric_values)
return avg_score, evaluation
# Example usage
article = "Artificial neural networks are computational models inspired by the human brain..."
summary = "Neural networks are computer systems that learn from examples, similar to how humans learn."
score, eval_details = llm_eval(summary, article)
print(f"Average Score: {score:.2f}/5.0")
print(f"Evaluation: {json.dumps(eval_details, indent=2)}")
```
## Promptfoo Integration - YAML Configuration
Configure evaluation suites using promptfoo for systematic prompt testing with multiple models and graders.
```yaml
description: "Animal Legs Eval"
prompts:
- prompts.py:simple_prompt
- prompts.py:better_prompt
- prompts.py:chain_of_thought_prompt
providers:
- anthropic:messages:claude-3-haiku-20240307
- anthropic:messages:claude-3-5-sonnet-20240620
tests: animal_legs_tests.csv
defaultTest:
options:
transform: file://transform.py
```
```python
# prompts.py - Define prompt variants
def simple_prompt(animal_statement):
return f"""You will be provided a statement about an animal and your job is to determine how many legs that animal has.
Here is the animal statement.
{animal_statement}
How many legs does the animal have? Please respond with a number"""
def chain_of_thought_prompt(animal_statement):
return f"""You will be provided a statement about an animal and your job is to determine how many legs that animal has.
Here is the animal statement.
{animal_statement}
How many legs does the animal have?
Start by reasoning about the numbers of legs the animal has, thinking step by step inside of tags.
Then, output your final answer inside of tags.
Inside the tags return just the number of legs as an integer and nothing else."""
# Run evaluation from command line:
# promptfoo eval
```
## Streaming Responses - Real-time Token Generation
Stream Claude's responses token-by-token for better user experience in interactive applications.
```python
from anthropic import Anthropic
client = Anthropic()
# Basic streaming
print("Streaming response: ", end="", flush=True)
with client.messages.stream(
model="claude-3-haiku-20240307",
max_tokens=1000,
messages=[{"role": "user", "content": "Write a short story about a robot."}]
) as stream:
for text in stream.text_stream:
print(text, end="", flush=True)
print("\n")
# Streaming with event handling
with client.messages.stream(
model="claude-3-haiku-20240307",
max_tokens=500,
messages=[{"role": "user", "content": "Count from 1 to 5."}]
) as stream:
for event in stream:
if event.type == "content_block_start":
print("Claude started responding...")
elif event.type == "content_block_delta":
print(event.delta.text, end="", flush=True)
elif event.type == "message_stop":
print("\nClaude finished responding.")
# Get final message after streaming
final_message = stream.get_final_message()
print(f"Total tokens: {final_message.usage.output_tokens}")
```
## Vision - Image Analysis with Claude
Send images to Claude for analysis, description, and visual question answering using base64 encoding or URLs.
```python
from anthropic import Anthropic
import base64
client = Anthropic()
# Load and encode image
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.standard_b64encode(image_file.read()).decode("utf-8")
image_data = encode_image("photo.jpg")
# Analyze image
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=1000,
messages=[{
"role": "user",
"content": [
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg",
"data": image_data
}
},
{
"type": "text",
"text": "What's in this image? Please describe it in detail."
}
]
}]
)
print(response.content[0].text)
# Multiple images with comparison
response = client.messages.create(
model="claude-3-haiku-20240307",
max_tokens=1000,
messages=[{
"role": "user",
"content": [
{"type": "image", "source": {"type": "base64", "media_type": "image/jpeg", "data": image1_data}},
{"type": "image", "source": {"type": "base64", "media_type": "image/jpeg", "data": image2_data}},
{"type": "text", "text": "What are the differences between these two images?"}
]
}]
)
print(response.content[0].text)
```
The Anthropic courses repository provides a complete educational pathway for developers looking to master Claude AI integration. The main use cases include learning SDK fundamentals for API integration, mastering prompt engineering techniques for optimal model performance, implementing tool use for function calling capabilities, building evaluation frameworks for production quality assurance, and applying real-world prompting patterns to complex applications. Students progress through hands-on notebooks with immediate feedback, making it ideal for self-paced learning.
Integration patterns follow industry best practices including secure API key management using environment variables, structured prompt formatting with XML tags for data separation, systematic evaluation pipelines for quality measurement, and streaming responses for interactive applications. The courses emphasize practical patterns such as chain-of-thought reasoning for complex tasks, role prompting for consistent behavior, few-shot learning with examples, and tool use schemas for extending Claude's capabilities. Whether building chatbots, content analysis systems, research assistants, or automated evaluation frameworks, these courses provide the foundational knowledge and code patterns needed for production Claude implementations.