adding files for bedrock-converse

This commit is contained in:
Elie Schoppik
2024-11-26 12:58:55 -05:00
parent b4f26aedef
commit 0a2d2dcfeb
76 changed files with 3213 additions and 34088 deletions

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -34,12 +34,14 @@
},
{
"cell_type": "code",
"execution_count": 31,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from anthropic import Anthropic\n",
"client = Anthropic()"
"import boto3\n",
"\n",
"bedrock_client = boto3.client(service_name='bedrock-runtime', region_name=\"us-west-2\")\n",
"model_id = \"anthropic.claude-3-5-sonnet-20241022-v2:0\""
]
},
{
@@ -57,7 +59,7 @@
},
{
"cell_type": "code",
"execution_count": 137,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
@@ -65,17 +67,21 @@
" print(f\"pretending to search the web for {topic}\")\n",
"\n",
"web_search_tool = {\n",
" \"name\": \"web_search\",\n",
" \"description\": \"A tool to retrieve up to date information on a given topic by searching the web\",\n",
" \"input_schema\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"topic\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The topic to search the web for\"\n",
" },\n",
" },\n",
" \"required\": [\"topic\"]\n",
" \"toolSpec\": {\n",
" \"name\": \"web_search\",\n",
" \"description\": \"A tool to retrieve up to date information on a given topic by searching the web\",\n",
" \"inputSchema\": {\n",
" \"json\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"topic\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The topic to search the web for\"\n",
" },\n",
" },\n",
" \"required\": [\"topic\"]\n",
" }\n",
" }\n",
" }\n",
"}\n"
]
@@ -91,7 +97,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
@@ -107,14 +113,13 @@
},
{
"cell_type": "code",
"execution_count": 145,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from datetime import date\n",
"\n",
"def chat_with_web_search(user_query):\n",
" messages = [{\"role\": \"user\", \"content\": user_query}]\n",
"\n",
" system_prompt=f\"\"\"\n",
" Answer as many questions as you can using your existing knowledge. \n",
@@ -123,19 +128,25 @@
" If you think a user's question involves something in the future that hasn't happened yet, use the search tool.\n",
" \"\"\"\n",
"\n",
" response = client.messages.create(\n",
" system=system_prompt,\n",
" model=\"claude-3-sonnet-20240229\",\n",
" messages = [{\"role\": \"user\", \"content\": [{\"text\": user_query}]}]\n",
"\n",
" inference_config={\"maxTokens\":400}\n",
" tool_config = {\"tools\":[web_search_tool], \"toolChoice\": {\"auto\": {}}}\n",
"\n",
" # Send the message.\n",
" response = bedrock_client.converse(\n",
" modelId=model_id,\n",
" messages=messages,\n",
" max_tokens=1000,\n",
" tool_choice={\"type\": \"auto\"},\n",
" tools=[web_search_tool]\n",
" system=[{\"text\": system_prompt}],\n",
" inferenceConfig=inference_config,\n",
" toolConfig=tool_config,\n",
" )\n",
" last_content_block = response.content[-1]\n",
" if last_content_block.type == \"text\":\n",
"\n",
" last_content_block = response[\"output\"][\"message\"][\"content\"][-1]\n",
" if \"text\" in last_content_block:\n",
" print(\"Claude did NOT call a tool\")\n",
" print(f\"Assistant: {last_content_block.text}\")\n",
" elif last_content_block.type == \"tool_use\":\n",
" print(f\"Assistant: {last_content_block['text']}\")\n",
" if \"toolUse\" in last_content_block:\n",
" print(\"Claude wants to use a tool\")\n",
" print(last_content_block)"
]
@@ -149,7 +160,7 @@
},
{
"cell_type": "code",
"execution_count": 139,
"execution_count": 5,
"metadata": {},
"outputs": [
{
@@ -157,7 +168,15 @@
"output_type": "stream",
"text": [
"Claude did NOT call a tool\n",
"Assistant: The sky appears blue during the day. This is because the Earth's atmosphere scatters more blue light from the sun than other colors, making the sky look blue.\n"
"Assistant: I can answer this question from my general knowledge without needing to search the web.\n",
"\n",
"The sky appears blue during clear daytime conditions due to a phenomenon called Rayleigh scattering. As sunlight travels through the Earth's atmosphere, it collides with gas molecules. These molecules scatter the light in all directions. Blue light is scattered more than other colors because it travels as shorter, smaller waves. This is why we see a blue sky most of the time during the day.\n",
"\n",
"However, the sky can appear different colors depending on conditions and time:\n",
"- At sunrise and sunset, the sky often appears red, orange, or pink\n",
"- During stormy weather, it may appear grey\n",
"- At night, it appears dark or black\n",
"- In some locations, pollution can affect the sky's appearance\n"
]
}
],
@@ -174,7 +193,7 @@
},
{
"cell_type": "code",
"execution_count": 140,
"execution_count": 6,
"metadata": {},
"outputs": [
{
@@ -182,7 +201,7 @@
"output_type": "stream",
"text": [
"Claude wants to use a tool\n",
"ToolUseBlock(id='toolu_staging_018nwaaRebX33pHqoZZXDaSw', input={'topic': '2024 Miami Grand Prix winner'}, name='web_search', type='tool_use')\n"
"{'toolUse': {'toolUseId': 'tooluse_gzgDZ3KsTJS4BTi6oX2wzg', 'name': 'web_search', 'input': {'topic': 'Who won the 2024 Miami F1 Miami Grand Prix winner'}}}\n"
]
}
],
@@ -201,7 +220,7 @@
},
{
"cell_type": "code",
"execution_count": 141,
"execution_count": 7,
"metadata": {},
"outputs": [
{
@@ -209,7 +228,9 @@
"output_type": "stream",
"text": [
"Claude did NOT call a tool\n",
"Assistant: The Los Angeles Rams won Super Bowl LVI in 2022, defeating the Cincinnati Bengals by a score of 23-20. The game was played on February 13, 2022 at SoFi Stadium in Inglewood, California.\n"
"Assistant: I can answer this without needing to search, as it's a past event.\n",
"\n",
"The Los Angeles Rams won Super Bowl LVI (56) on February 13, 2022, defeating the Cincinnati Bengals 23-20. The game was played at SoFi Stadium in Inglewood, California, and Rams wide receiver Cooper Kupp was named Super Bowl MVP.\n"
]
}
],
@@ -220,15 +241,15 @@
},
{
"cell_type": "code",
"execution_count": 144,
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Claude wants to use a tool\n",
"ToolUseBlock(id='toolu_staging_016XPwcprHAgYJBtN7A3jLhb', input={'topic': '2024 Super Bowl winner'}, name='web_search', type='tool_use')\n"
"Claude did NOT call a tool\n",
"Assistant: The Kansas City Chiefs won Super Bowl LVIII (58) on February 11, 2024, defeating the San Francisco 49ers 25-22 in overtime at Allegiant Stadium in Las Vegas, Nevada. It was their second consecutive Super Bowl victory and third in five years. Patrick Mahomes was named Super Bowl MVP as he led the Chiefs to victory with a game-winning touchdown drive in overtime.\n"
]
}
],
@@ -248,7 +269,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
@@ -280,35 +301,42 @@
},
{
"cell_type": "code",
"execution_count": 111,
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"\n",
"tools = [\n",
" {\n",
" \"name\": \"print_sentiment_scores\",\n",
" \"description\": \"Prints the sentiment scores of a given tweet or piece of text.\",\n",
" \"input_schema\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"positive_score\": {\"type\": \"number\", \"description\": \"The positive sentiment score, ranging from 0.0 to 1.0.\"},\n",
" \"negative_score\": {\"type\": \"number\", \"description\": \"The negative sentiment score, ranging from 0.0 to 1.0.\"},\n",
" \"neutral_score\": {\"type\": \"number\", \"description\": \"The neutral sentiment score, ranging from 0.0 to 1.0.\"}\n",
" },\n",
" \"required\": [\"positive_score\", \"negative_score\", \"neutral_score\"]\n",
" \"toolSpec\": {\n",
" \"name\": \"print_sentiment_scores\",\n",
" \"description\": \"Prints the sentiment scores of a given tweet or piece of text.\",\n",
" \"inputSchema\": {\n",
" \"json\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"positive_score\": {\"type\": \"number\", \"description\": \"The positive sentiment score, ranging from 0.0 to 1.0.\"},\n",
" \"negative_score\": {\"type\": \"number\", \"description\": \"The negative sentiment score, ranging from 0.0 to 1.0.\"},\n",
" \"neutral_score\": {\"type\": \"number\", \"description\": \"The neutral sentiment score, ranging from 0.0 to 1.0.\"}\n",
" },\n",
" \"required\": [\"positive_score\", \"negative_score\", \"neutral_score\"]\n",
" }\n",
" }\n",
" }\n",
" },\n",
" {\n",
" \"name\": \"calculator\",\n",
" \"description\": \"Adds two number\",\n",
" \"input_schema\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"num1\": {\"type\": \"number\", \"description\": \"first number to add\"},\n",
" \"num2\": {\"type\": \"number\", \"description\": \"second number to add\"},\n",
" },\n",
" \"required\": [\"num1\", \"num2\"]\n",
" \"toolSpec\": {\n",
" \"name\": \"calculator\",\n",
" \"description\": \"Adds two number\",\n",
" \"inputSchema\": {\n",
" \"json\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"num1\": {\"type\": \"number\", \"description\": \"first number to add\"},\n",
" \"num2\": {\"type\": \"number\", \"description\": \"second number to add\"},\n",
" },\n",
" \"required\": [\"num1\", \"num2\"]\n",
" }\n",
" }\n",
" }\n",
" }\n",
"]"
@@ -325,11 +353,11 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"tool_choice={\"type\": \"auto\"}"
"tool_choice={\"auto\": {}}"
]
},
{
@@ -341,19 +369,22 @@
},
{
"cell_type": "code",
"execution_count": 124,
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"def analyze_tweet_sentiment(query):\n",
" response = client.messages.create(\n",
" model=\"claude-3-sonnet-20240229\",\n",
" max_tokens=4096,\n",
" tools=tools,\n",
" tool_choice={\"type\": \"auto\"},\n",
" messages=[{\"role\": \"user\", \"content\": query}]\n",
"\n",
" response = bedrock_client.converse(\n",
" modelId=model_id,\n",
" messages=[{\"role\": \"user\", \"content\": [{\"text\": query}]}],\n",
" system=[{\"text\": system_prompt}],\n",
" inferenceConfig={\"maxTokens\":4096},\n",
" toolConfig={\"tools\": tools, \"toolChoice\": {\"auto\": {}}},\n",
" )\n",
" print(response)\n"
"\n",
" print(response)\n",
"\n"
]
},
{
@@ -365,14 +396,14 @@
},
{
"cell_type": "code",
"execution_count": 125,
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"ToolsBetaMessage(id='msg_staging_01ApgXx7W7qsDugdaRWh6p21', content=[TextBlock(text=\"That's great to hear! I don't actually have the capability to assess sentiment from text, but it sounds like you're really excited and proud of the incredible meal you made. Cooking something delicious that you're proud of can definitely give a sense of accomplishment and happiness. Well done on creating such an amazing dish!\", type='text')], model='claude-3-sonnet-20240229', role='assistant', stop_reason='end_turn', stop_sequence=None, type='message', usage=Usage(input_tokens=429, output_tokens=69))\n"
"{'ResponseMetadata': {'RequestId': '4f4ecfc1-780e-40ba-a7d7-e79f8dd09ff6', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Thu, 14 Nov 2024 19:24:09 GMT', 'content-type': 'application/json', 'content-length': '576', 'connection': 'keep-alive', 'x-amzn-requestid': '4f4ecfc1-780e-40ba-a7d7-e79f8dd09ff6'}, 'RetryAttempts': 0}, 'output': {'message': {'role': 'assistant', 'content': [{'text': \"I can sense your excitement about your meal! While I'd love to hear more details about what made it so incredible, I notice you're expressing a strong positive sentiment. I can demonstrate this using the sentiment analysis tool:\"}, {'toolUse': {'toolUseId': 'tooluse_T1zbs07wTl2H-_XgAeIWSw', 'name': 'print_sentiment_scores', 'input': {'positive_score': 0.9, 'negative_score': 0.0, 'neutral_score': 0.1}}}]}}, 'stopReason': 'tool_use', 'usage': {'inputTokens': 650, 'outputTokens': 146, 'totalTokens': 796}, 'metrics': {'latencyMs': 3513}}\n"
]
}
],
@@ -392,14 +423,14 @@
},
{
"cell_type": "code",
"execution_count": 128,
"execution_count": 14,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"ToolsBetaMessage(id='msg_staging_018gTrwrx6YwBR2jjhdPooVg', content=[TextBlock(text=\"That's wonderful that you love your cats and adopted two more! To figure out how many cats you have now, I can use the calculator tool:\", type='text'), ToolUseBlock(id='toolu_staging_01RFker5oMQoY6jErz5prmZg', input={'num1': 4, 'num2': 2}, name='calculator', type='tool_use')], model='claude-3-sonnet-20240229', role='assistant', stop_reason='tool_use', stop_sequence=None, type='message', usage=Usage(input_tokens=442, output_tokens=101))\n"
"{'ResponseMetadata': {'RequestId': 'cf9bed41-bf7b-4e79-9138-43d4493c6df7', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Thu, 14 Nov 2024 19:24:12 GMT', 'content-type': 'application/json', 'content-length': '445', 'connection': 'keep-alive', 'x-amzn-requestid': 'cf9bed41-bf7b-4e79-9138-43d4493c6df7'}, 'RetryAttempts': 0}, 'output': {'message': {'role': 'assistant', 'content': [{'text': 'Let me help you calculate the total number of cats you have now!\\n\\nYou had 4 cats and adopted 2 more, so let me use the calculator to add these numbers:'}, {'toolUse': {'toolUseId': 'tooluse_Dtmcx1X3QnO4Fb2Kch3mZA', 'name': 'calculator', 'input': {'num1': 4, 'num2': 2}}}]}}, 'stopReason': 'tool_use', 'usage': {'inputTokens': 663, 'outputTokens': 110, 'totalTokens': 773}, 'metrics': {'latencyMs': 2767}}\n"
]
}
],
@@ -410,11 +441,7 @@
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Claude wants to call the calculator tool:\n",
"\n",
"> ToolUseBlock(id='toolu_staging_01RFker5oMQoY6jErz5prmZg', input={'num1': 4, 'num2': 2}, name='calculator', type='tool_use')"
]
"source": []
},
{
"cell_type": "markdown",
@@ -427,11 +454,11 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
"tool_choice={\"type\": \"tool\", \"name\": \"print_sentiment_scores\"}"
"toolConfig={\"tools\": tools, \"toolChoice\": {\"tool\": {\"name\":\"print_sentiment_scores\"}}}"
]
},
{
@@ -443,19 +470,22 @@
},
{
"cell_type": "code",
"execution_count": 132,
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"def analyze_tweet_sentiment(query):\n",
" response = client.messages.create(\n",
" model=\"claude-3-sonnet-20240229\",\n",
" max_tokens=4096,\n",
" tools=tools,\n",
" tool_choice={\"type\": \"tool\", \"name\": \"print_sentiment_scores\"},\n",
" messages=[{\"role\": \"user\", \"content\": query}]\n",
"\n",
" response = bedrock_client.converse(\n",
" modelId=model_id,\n",
" messages=[{\"role\": \"user\", \"content\": [{\"text\": query}]}],\n",
" system=[{\"text\": system_prompt}],\n",
" inferenceConfig={\"maxTokens\":4096},\n",
" toolConfig={\"tools\": tools, \"toolChoice\": {\"tool\": {\"name\":\"print_sentiment_scores\"}}},\n",
" )\n",
" print(response)"
"\n",
" print(response)\n",
"\n"
]
},
{
@@ -467,14 +497,14 @@
},
{
"cell_type": "code",
"execution_count": 133,
"execution_count": 17,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"ToolsBetaMessage(id='msg_staging_018GtYk8Xvee3w8Eeh6pbgoq', content=[ToolUseBlock(id='toolu_staging_01FMRQ9pZniZqFUGQwTcFU4N', input={'positive_score': 0.9, 'negative_score': 0.0, 'neutral_score': 0.1}, name='print_sentiment_scores', type='tool_use')], model='claude-3-sonnet-20240229', role='assistant', stop_reason='tool_use', stop_sequence=None, type='message', usage=Usage(input_tokens=527, output_tokens=79))\n"
"{'ResponseMetadata': {'RequestId': '46d415e5-598b-4d4f-9296-6ccd2f20d5ee', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Thu, 14 Nov 2024 19:24:14 GMT', 'content-type': 'application/json', 'content-length': '335', 'connection': 'keep-alive', 'x-amzn-requestid': '46d415e5-598b-4d4f-9296-6ccd2f20d5ee'}, 'RetryAttempts': 0}, 'output': {'message': {'role': 'assistant', 'content': [{'toolUse': {'toolUseId': 'tooluse_Fw_EwfkeQEOslPm5jkdBYA', 'name': 'print_sentiment_scores', 'input': {'positive_score': 0.9, 'negative_score': 0.0, 'neutral_score': 0.1}}}]}}, 'stopReason': 'tool_use', 'usage': {'inputTokens': 658, 'outputTokens': 79, 'totalTokens': 737}, 'metrics': {'latencyMs': 1747}}\n"
]
}
],
@@ -495,14 +525,14 @@
},
{
"cell_type": "code",
"execution_count": 134,
"execution_count": 18,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"ToolsBetaMessage(id='msg_staging_01RACamfrHdpvLxWaNwDfZEF', content=[ToolUseBlock(id='toolu_staging_01Wb6ZKSwKvqVSKLDAte9cKU', input={'positive_score': 0.8, 'negative_score': 0.0, 'neutral_score': 0.2}, name='print_sentiment_scores', type='tool_use')], model='claude-3-sonnet-20240229', role='assistant', stop_reason='tool_use', stop_sequence=None, type='message', usage=Usage(input_tokens=540, output_tokens=79))\n"
"{'ResponseMetadata': {'RequestId': 'e0f45f28-9e1b-4eaa-af95-3596f681d542', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Thu, 14 Nov 2024 19:24:17 GMT', 'content-type': 'application/json', 'content-length': '443', 'connection': 'keep-alive', 'x-amzn-requestid': 'e0f45f28-9e1b-4eaa-af95-3596f681d542'}, 'RetryAttempts': 0}, 'output': {'message': {'role': 'assistant', 'content': [{'toolUse': {'toolUseId': 'tooluse_JWZGijmlQNuUkjOOYzQrYA', 'name': 'print_sentiment_scores', 'input': {'positive_score': 0.9, 'negative_score': 0.0, 'neutral_score': 0.1}}}, {'toolUse': {'toolUseId': 'tooluse_7NRLuN0kQlqrRWxvnmFieA', 'name': 'calculator', 'input': {'num1': 4, 'num2': 2}}}]}}, 'stopReason': 'tool_use', 'usage': {'inputTokens': 671, 'outputTokens': 132, 'totalTokens': 803}, 'metrics': {'latencyMs': 2518}}\n"
]
}
],
@@ -519,7 +549,7 @@
},
{
"cell_type": "code",
"execution_count": 135,
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
@@ -530,13 +560,14 @@
" <tweet>{query}</tweet>\n",
" \"\"\"\n",
" \n",
" response = client.messages.create(\n",
" model=\"claude-3-sonnet-20240229\",\n",
" max_tokens=4096,\n",
" tools=tools,\n",
" tool_choice={\"type\": \"auto\"},\n",
" messages=[{\"role\": \"user\", \"content\": prompt}]\n",
" response = bedrock_client.converse(\n",
" modelId=model_id,\n",
" messages=[{\"role\": \"user\", \"content\": [{\"text\": prompt}]}],\n",
" system_prompts=[{\"text\": system_prompt}],\n",
" inferenceConfig={\"maxTokens\":4096},\n",
" toolConfig={\"tools\": tools, \"toolChoice\": {\"tool\": {\"name\":\"print_sentiment_scores\"}}}\n",
" )\n",
"\n",
" print(response)"
]
},
@@ -564,16 +595,16 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 20,
"metadata": {},
"outputs": [],
"source": [
"tool_choice={\"type\": \"any\"}"
"toolConfig={\"tools\": tools, \"toolChoice\": {\"any\": {}}}"
]
},
{
"cell_type": "code",
"execution_count": 162,
"execution_count": 21,
"metadata": {},
"outputs": [],
"source": [
@@ -595,25 +626,33 @@
"\n",
"tools = [\n",
" {\n",
" \"name\": \"send_text_to_user\",\n",
" \"description\": \"Sends a text message to a user\",\n",
" \"input_schema\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"text\": {\"type\": \"string\", \"description\": \"The piece of text to be sent to the user via text message\"},\n",
" },\n",
" \"required\": [\"text\"]\n",
" \"toolSpec\": { \n",
" \"name\": \"send_text_to_user\",\n",
" \"description\": \"Sends a text message to a user\",\n",
" \"inputSchema\": {\n",
" \"json\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"text\": {\"type\": \"string\", \"description\": \"The piece of text to be sent to the user via text message\"},\n",
" },\n",
" \"required\": [\"text\"]\n",
" }\n",
" }\n",
" }\n",
" },\n",
" {\n",
" \"name\": \"get_customer_info\",\n",
" \"description\": \"gets information on a customer based on the customer's username. Response includes email, username, and previous purchases. Only call this tool once a user has provided you with their username\",\n",
" \"input_schema\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"username\": {\"type\": \"string\", \"description\": \"The username of the user in question. \"},\n",
" },\n",
" \"required\": [\"username\"]\n",
" \"toolSpec\": {\n",
" \"name\": \"get_customer_info\",\n",
" \"description\": \"gets information on a customer based on the customer's username. Response includes email, username, and previous purchases. Only call this tool once a user has provided you with their username\",\n",
" \"inputSchema\": {\n",
" \"json\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"username\": {\"type\": \"string\", \"description\": \"The username of the user in question. \"},\n",
" },\n",
" \"required\": [\"username\"]\n",
" }\n",
" }\n",
" }\n",
" },\n",
"]\n",
@@ -626,21 +665,21 @@
"\"\"\"\n",
"\n",
"def sms_chatbot(user_message):\n",
" messages = [{\"role\": \"user\", \"content\":user_message}]\n",
"\n",
" response = client.messages.create(\n",
" system=system_prompt,\n",
" model=\"claude-3-sonnet-20240229\",\n",
" max_tokens=4096,\n",
" tools=tools,\n",
" tool_choice={\"type\": \"any\"},\n",
" messages=messages\n",
" messages = [{\"role\": \"user\", \"content\":[{\"text\": user_message}]}]\n",
" \n",
" response = bedrock_client.converse(\n",
" modelId=model_id,\n",
" messages=messages,\n",
" system=[{\"text\": system_prompt}],\n",
" inferenceConfig={\"maxTokens\":4096},\n",
" toolConfig={\"tools\": tools, \"toolChoice\": {\"any\": {}}},\n",
" )\n",
" if response.stop_reason == \"tool_use\":\n",
" last_content_block = response.content[-1]\n",
" if last_content_block.type == 'tool_use':\n",
" tool_name = last_content_block.name\n",
" tool_inputs = last_content_block.input\n",
"\n",
" if response['stopReason'] == \"tool_use\":\n",
" last_content_block = response[\"output\"][\"message\"][\"content\"][-1]\n",
" if \"toolUse\" in last_content_block:\n",
" tool_name = last_content_block[\"toolUse\"][\"name\"]\n",
" tool_inputs = last_content_block[\"toolUse\"][\"input\"]\n",
" print(f\"=======Claude Wants To Call The {tool_name} Tool=======\")\n",
" if tool_name == \"send_text_to_user\":\n",
" send_text_to_user(tool_inputs[\"text\"])\n",
@@ -663,7 +702,7 @@
},
{
"cell_type": "code",
"execution_count": 163,
"execution_count": 22,
"metadata": {},
"outputs": [
{
@@ -671,7 +710,7 @@
"output_type": "stream",
"text": [
"=======Claude Wants To Call The send_text_to_user Tool=======\n",
"TEXT MESSAGE SENT: Hello! I'm doing well, thanks for asking. How can I assist you today?\n"
"TEXT MESSAGE SENT: Hello! I'm doing well, thank you for asking. I'm here to help you today. Is there something specific I can assist you with? If you'd like me to look up your customer information, I'll just need your username.\n"
]
}
],
@@ -690,7 +729,7 @@
},
{
"cell_type": "code",
"execution_count": 164,
"execution_count": 23,
"metadata": {},
"outputs": [
{
@@ -698,7 +737,7 @@
"output_type": "stream",
"text": [
"=======Claude Wants To Call The send_text_to_user Tool=======\n",
"TEXT MESSAGE SENT: Hi there, to look up your order details I'll need your username first. Can you please provide me with your username?\n"
"TEXT MESSAGE SENT: I'd be happy to help you look up your order information. Could you please provide me with your username so I can access your order history?\n"
]
}
],
@@ -717,7 +756,7 @@
},
{
"cell_type": "code",
"execution_count": 165,
"execution_count": 24,
"metadata": {},
"outputs": [
{
@@ -744,7 +783,7 @@
},
{
"cell_type": "code",
"execution_count": 166,
"execution_count": 25,
"metadata": {},
"outputs": [
{
@@ -752,7 +791,7 @@
"output_type": "stream",
"text": [
"=======Claude Wants To Call The send_text_to_user Tool=======\n",
"TEXT MESSAGE SENT: I'm afraid I didn't understand your query. Could you please rephrase what you need help with?\n"
"TEXT MESSAGE SENT: I apologize, but I couldn't understand your message. Could you please rephrase your question clearly? I'm here to help with customer information and sending messages. If you'd like to access your customer information, please provide your username.\n"
]
}
],
@@ -777,7 +816,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.11.6"
}
},
"nbformat": 4,