A comprehensive guide for implementing powerful Azure AI Foundry templates
A web-based chat application that allows users to interact with Azure OpenAI models. It provides a foundation for building conversational AI experiences with optional Retrieval-Augmented Generation (RAG) capabilities.
A web-based chat application with an AI agent that can perform tasks and retrieve information from uploaded files. It leverages Azure AI Agent service and Azure AI Search for knowledge retrieval.
A solution that helps organizations extract actionable insights from large volumes of conversational data by identifying key themes, patterns, and relationships in unstructured dialogue.
Install the required tools:
# Install Azure Developer CLI
curl -fsSL https://aka.ms/install-azd.sh | bash
# Install Azure CLI
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
# Ensure Docker is installed
docker --version
git clone https://github.com/Azure-Samples/get-started-with-ai-chat.git
cd get-started-with-ai-chat
# Login to Azure
azd auth login
# Initialize the project with a new environment name
azd init --environment myaichat
# Deploy all resources to Azure
azd up
The application consists of:
src/api directory@router.post("/chat")
async def chat_endpoint(request: ChatRequest) -> ChatResponse:
"""
Process a chat request and return a response from the AI model.
"""
try:
# Get the chat client
client = get_chat_client()
# Process the request
response = await client.get_chat_completion(
messages=request.messages,
temperature=request.temperature,
top_p=request.top_p,
max_tokens=request.max_tokens,
stream=request.stream
)
return ChatResponse(
message=response.message,
usage=response.usage
)
except Exception as e:
# Handle errors
logger.error(f"Error in chat endpoint: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
Update the deployment parameters in the infra/main.bicep file:
// Find the AI model deployment section
resource openAIDeployment 'Microsoft.CognitiveServices/accounts/deployments@2023-05-01' = {
parent: openAI
name: openAIModelDeploymentName
properties: {
model: {
format: 'OpenAI'
name: openAIModelName // Change this to your preferred model
}
// Other properties...
}
}
Update the infra/main.parameters.json file:
{
"parameters": {
"enableSearch": {
"value": true
}
}
}
# Get the application URL
azd show --urls
# View application logs
azd monitor
# Remove all deployed resources
azd down
Install the required tools:
# Install Azure Developer CLI
curl -fsSL https://aka.ms/install-azd.sh | bash
# Install Azure CLI
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
# Ensure Docker is installed
docker --version
git clone https://github.com/Azure-Samples/get-started-with-ai-agents.git
cd get-started-with-ai-agents
# Login to Azure
azd auth login
# Initialize the project with a new environment name
azd init --environment myaiagent
# Deploy all resources to Azure
azd up
The application consists of:
src/api directorysrc/frontend directorysrc/data directory@router.post("/agent")
async def agent_endpoint(request: AgentRequest) -> AgentResponse:
"""
Process an agent request and return a response.
"""
try:
# Get the agent client
client = get_agent_client()
# Process the request
response = await client.run_agent(
messages=request.messages,
tools=get_available_tools(),
temperature=request.temperature
)
return AgentResponse(
message=response.message,
citations=response.citations,
usage=response.usage
)
except Exception as e:
# Handle errors
logger.error(f"Error in agent endpoint: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/upload")
async def upload_file(file: UploadFile) -> FileUploadResponse:
"""
Upload a file for knowledge retrieval.
"""
try:
# Save the file
file_id = await save_file(file)
# Index the file content for search
await index_file_content(file_id, file)
return FileUploadResponse(
file_id=file_id,
status="success",
message="File uploaded and indexed successfully"
)
except Exception as e:
logger.error(f"Error uploading file: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
Customize the tools available to the agent in src/api/tools/tool_registry.py:
def get_available_tools() -> List[Tool]:
"""
Return the list of tools available to the agent.
"""
return [
Tool(
name="search_knowledge_base",
description="Search the knowledge base for information",
parameters={
"query": {
"type": "string",
"description": "The search query"
}
},
function=search_knowledge_base
),
# Add your custom tools here
]
# Get the application URL
azd show --urls
To test the agent with file uploads:
# View application logs
azd monitor
# Remove all deployed resources
azd down
git clone https://github.com/microsoft/Conversation-Knowledge-Mining-Solution-Accelerator.git
cd Conversation-Knowledge-Mining-Solution-Accelerator
Before deployment, familiarize yourself with the solution architecture:
# Login to Azure
azd auth login
# Initialize the project with a new environment name
azd init --environment myconvomining
# Deploy all resources to Azure
azd up
The application consists of:
src/api directorysrc/App directorydef process_conversation(conversation_data: dict) -> dict:
"""
Process a conversation to extract insights.
"""
# Extract basic metadata
conversation_id = conversation_data.get("id", str(uuid.uuid4()))
# Process text content
text_content = conversation_data.get("text", "")
if text_content:
# Extract entities
entities = extract_entities(text_content)
# Extract key phrases
key_phrases = extract_key_phrases(text_content)
# Perform sentiment analysis
sentiment = analyze_sentiment(text_content)
# Generate embeddings for vector search
embeddings = generate_embeddings(text_content)
# Process audio content if available
audio_url = conversation_data.get("audio_url")
if audio_url:
# Transcribe audio to text
transcription = transcribe_audio(audio_url)
text_content = transcription.text
# Process the transcribed text
# (similar to text processing above)
# Store processed data
processed_data = {
"id": conversation_id,
"text": text_content,
"entities": entities,
"key_phrases": key_phrases,
"sentiment": sentiment,
"embeddings": embeddings,
"metadata": conversation_data.get("metadata", {})
}
# Store in database
store_processed_conversation(processed_data)
return processed_data
@router.post("/query")
async def query_conversations(request: QueryRequest) -> QueryResponse:
"""
Process a natural language query about conversations.
"""
try:
# Get the query client
client = get_query_client()
# Process the natural language query
query_text = request.question
# Generate embeddings for the query
query_embedding = generate_embeddings(query_text)
# Retrieve relevant conversations using vector search
relevant_conversations = vector_search(
embedding=query_embedding,
top_k=request.top_k or 5
)
# Generate a response using Azure OpenAI
response = generate_response(
query=query_text,
context=relevant_conversations,
temperature=request.temperature or 0.7
)
# Format citations and evidence
citations = format_citations(relevant_conversations)
return QueryResponse(
answer=response,
citations=citations,
conversations=relevant_conversations
)
except Exception as e:
logger.error(f"Error processing query: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
Modify the entity extraction configuration in src/api/services/entity_extraction.py:
def configure_custom_entity_types():
"""
Configure custom entity types for extraction.
"""
return [
{
"name": "ProductName",
"description": "Names of products mentioned in conversations",
"examples": ["Product A", "Product B", "Service X"]
},
{
"name": "IssueType",
"description": "Types of issues mentioned in conversations",
"examples": ["login problem", "payment failure", "slow performance"]
},
# Add your custom entity types here
]
# Navigate to the sample data directory
cd workshop/data
# Run the data loading script
python load_sample_data.py
# Get the application URL
azd show --urls
To test the natural language query interface:
# View application logs
azd monitor
# Remove all deployed resources
azd down