equilink-site/code_diagrams/tutorial_diagrams/2_ai_model_response.codediagram

1 line
11 KiB
Plaintext
Raw Permalink Normal View History

2025-02-12 17:38:06 +05:30
{"id":-1,"name":"Onboarding diagram","userId":-1,"createdAt":"","updatedAt":"","content":{"items":[{"uid":"tXz3sfRHmC","position":{"x":-620,"y":180},"sizes":{"width":469.6875,"height":482.25},"autoheight":true,"blockContent":{"type":"doc","content":[{"type":"heading","attrs":{"level":1},"content":[{"type":"text","text":"ARAI Agent Basic"}]},{"type":"paragraph","content":[{"type":"text","text":"Simple way of sending a prompt and receiving a response from ai."}]},{"type":"paragraph"},{"type":"paragraph","content":[{"type":"text","text":"We use messages as a dictionary as it creates the habit of allowing for more complex prompts in the future."}]},{"type":"paragraph"},{"type":"paragraph","content":[{"type":"text","text":"This will come in handy for:"}]},{"type":"bulletList","content":[{"type":"listItem","content":[{"type":"paragraph","content":[{"type":"text","text":"Personality, Style or Persona."}]},{"type":"bulletList","content":[{"type":"listItem","content":[{"type":"paragraph","content":[{"type":"text","text":"Tells the AI what type of expert it is so it can draft a more accurate response."}]}]}]}]},{"type":"listItem","content":[{"type":"paragraph","content":[{"type":"text","text":"History [ ] "}]},{"type":"bulletList","content":[{"type":"listItem","content":[{"type":"paragraph","content":[{"type":"text","text":"for prompts and responses for ai to have context. "}]}]}]}]},{"type":"listItem","content":[{"type":"paragraph","content":[{"type":"text","text":"Memories []"}]},{"type":"bulletList","content":[{"type":"listItem","content":[{"type":"paragraph","content":[{"type":"text","text":"for vector db on most recent memories, and most relevant"}]}]}]}]}]},{"type":"paragraph"}]},"nodeType":"block"},{"uid":"zT1dxIBB8b","position":{"x":570,"y":570},"sizes":{"width":679.921875,"height":801.3125},"autoheight":true,"blockContent":{"type":"doc","content":[{"type":"filePathNode","attrs":{"pathToFile":"","version":1},"content":[{"type":"text","marks":[{"type":"bold"}],"text":"models\\gemini_model.py"}]},{"type":"codeBlock","attrs":{"language":"python","wrapCode":true},"content":[{"type":"text","text":" def generate_response_from_string(self, prompt, **kwargs):\n # Extract personality and style from kwargs, or use defaults from agent_template\n if kwargs:\n if \"personality\" in kwargs:\n personality = kwargs.get(\"personality\")\n if \"communication_style\" in kwargs:\n communication_style = kwargs.get(\"communication_style\")\n else:\n personality = \"\"\n communication_style = \"\" \n\n try:\n # instructions being sent to the ai model\n messages = []\n\n # add personality and style to the instructions\n if personality or communication_style:\n persona_prompt = f\"{personality} {communication_style}\"\n messages.append({\n \"role\": \"user\",\n \"parts\": [persona_prompt]\n })\n\n # user message\n messages.append({\n \"role\": \"user\",\n \"parts\": [prompt]\n })\n\n # Make sure that what is being sent to the model is correct\n # print(messages)\n\n # generate the response\n response = self.model.generate_content(messages)\n return response.text.strip()\n\n except Exception as e:\n return f\"Error generating response: {str(e)}\""}]}]},"nodeType":"block"},{"uid":"vg7Z0EZd1u","position":{"x":-640,"y":720},"sizes":{"width":509.6875,"height":293.8125},"autoheight":true,"blockContent":{"type":"doc","content":[{"type":"filePathNode","attrs":{"pathToFile":"","version":1},"content":[{"type":"text","marks":[{"type":"bold"}],"text":"models\\gemini_model.py"}]},{"type":"codeBlock","attrs":{"language":"python","wrapCode":true},"content":[{"type":"text","text":"messages = []\n\nmessages.append({\n \"role\": \"user\",\r\n \"parts\": \"what is the capital of france\"\n})