目录
- 理解 ContextMixin
- 什么是 ContextMixin?
- 主要组件
- 实现细节
- 测试 ContextMixin
- 示例:ModelX
- 1. 配置优先级
- 2. 多继承
- 3. 多继承重写
- 4. 配置优先级
在本文中,我们将探索 ContextMixin 类,它在多重继承场景中的集成及其在 Python 配置和上下文管理中的应用。此外,我们将通过测试验证其功能,以了解它如何简化模型配置的处理。让我们深入了解代码片段的详细解释。
理解 ContextMixin
什么是 ContextMixin?
ContextMixin
是一个用于高效管理上下文和配置的 Python 类。继承该类的模型或对象能够:
- 通过灵活的优先级规则处理上下文(
private_context
)和配置(private_config
)。 - 管理与 LLM(
private_llm
)实例的交互。 - 支持动态设置属性的覆盖机制。
主要组件
-
私有上下文和配置:
private_context
和private_config
被设计为内部属性,为每个实例提供灵活的作用域。- 这些属性默认值为
None
,但可以显式覆盖。
-
LLM 管理:
- 通过
private_llm
集成 LLM,支持从配置动态初始化。
- 通过
实现细节
以下是核心 ContextMixin
类:
from typing import Optional
from pydantic import BaseModel, ConfigDict, Field, model_validator
from metagpt.config2 import Config
from metagpt.context import Context
from metagpt.provider.base_llm import BaseLLM
class ContextMixin(BaseModel):
"""Mixin class for context and config"""
model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
# Pydantic has bug on _private_attr when using inheritance, so we use private_* instead
# - https://github.com/pydantic/pydantic/issues/7142
# - https://github.com/pydantic/pydantic/issues/7083
# - https://github.com/pydantic/pydantic/issues/7091
# Env/Role/Action will use this context as private context, or use self.context as public context
private_context: Optional[Context] = Field(default=None, exclude=True)
# Env/Role/Action will use this config as private config, or use self.context.config as public config
private_config: Optional[Config] = Field(default=None, exclude=True)
# Env/Role/Action will use this llm as private llm, or use self.context._llm instance
private_llm: Optional[BaseLLM] = Field(default=None, exclude=True)
@model_validator(mode="after")
def validate_context_mixin_extra(self):
self._process_context_mixin_extra()
return self
def _process_context_mixin_extra(self):
"""Process the extra field"""
kwargs = self.model_extra or {}
self.set_context(kwargs.pop("context", None))
self.set_config(kwargs.pop("config", None))
self.set_llm(kwargs.pop("llm", None))
def set(self, k, v, override=False):
"""Set attribute"""
if override or not self.__dict__.get(k):
self.__dict__[k] = v
def set_context(self, context: Context, override=True):
"""Set context"""
self.set("private_context", context, override)
def set_config(self, config: Config, override=False):
"""Set config"""
self.set("private_config", config, override)
if config is not None:
_ = self.llm # init llm
def set_llm(self, llm: BaseLLM, override=False):
"""Set llm"""
self.set("private_llm", llm, override)
@property
def config(self) -> Config:
"""Role config: role config > context config"""
if self.private_config:
return self.private_config
return self.context.config
@config.setter
def config(self, config: Config) -> None:
"""Set config"""
self.set_config(config)
@property
def context(self) -> Context:
"""Role context: role context > context"""
if self.private_context:
return self.private_context
return Context()
@context.setter
def context(self, context: Context) -> None:
"""Set context"""
self.set_context(context)
@property
def llm(self) -> BaseLLM:
"""Role llm: if not existed, init from role.config"""
# print(f"class:{self.__class__.__name__}({self.name}), llm: {self._llm}, llm_config: {self._llm_config}")
if not self.private_llm:
self.private_llm = self.context.llm_with_cost_manager_from_llm_config(self.config.llm)
return self.private_llm
@llm.setter
def llm(self, llm: BaseLLM) -> None:
"""Set llm"""
self.private_llm = llm
ContextMixin
通过 Pydantic 进行模型验证和数据管理,在处理任意字段时提供了灵活性。
测试 ContextMixin
示例:ModelX
为了演示 ContextMixin
的工作原理,我们创建了一个简单的模型 ModelX
,继承自 ContextMixin
, 验证 ModelX
能正确继承默认属性,同时保留 ContextMixin
的功能。
ContextMixin
可以无缝集成到多重继承的层次结构中,
ModelY
结合了 ContextMixin
和 WTFMixin
,继承了两者的字段和功能。
class ModelX(ContextMixin, BaseModel):
a: str = "a"
b: str = "b"
class WTFMixin(BaseModel):
c: str = "c"
d: str = "d"
class ModelY(WTFMixin, ModelX):
pass
def test_config_mixin_1():
new_model = ModelX()
assert new_model.a == "a"
assert new_model.b == "b"
test_config_mixin_1()
1. 配置优先级
from metagpt.configs.llm_config import LLMConfig
mock_llm_config = LLMConfig(
llm_type="mock",
api_key="mock_api_key",
base_url="mock_base_url",
app_id="mock_app_id",
api_secret="mock_api_secret",
domain="mock_domain",
)
mock_llm_config_proxy = LLMConfig(
llm_type="mock",
api_key="mock_api_key",
base_url="mock_base_url",
proxy="http://localhost:8080",
)
def test_config_mixin_2():
i = Config(llm=mock_llm_config)
j = Config(llm=mock_llm_config_proxy)
obj = ModelX(config=i)
assert obj.config == i
assert obj.config.llm == mock_llm_config
obj.set_config(j)
# obj already has a config, so it will not be set
assert obj.config == i
test_config_mixin_2()
2. 多继承
def test_config_mixin_3_multi_inheritance_not_override_config():
"""Test config mixin with multiple inheritance"""
i = Config(llm=mock_llm_config)
j = Config(llm=mock_llm_config_proxy)
obj = ModelY(config=i)
assert obj.config == i
assert obj.config.llm == mock_llm_config
obj.set_config(j)
# obj already has a config, so it will not be set
assert obj.config == i
assert obj.config.llm == mock_llm_config
assert obj.a == "a"
assert obj.b == "b"
assert obj.c == "c"
assert obj.d == "d"
print(obj.__dict__.keys())
print(obj.__dict__)
assert "private_config" in obj.__dict__.keys()
test_config_mixin_3_multi_inheritance_not_override_config()
dict_keys(['private_context', 'private_config', 'private_llm', 'a', 'b', 'c', 'd'])
{'private_context': None, 'private_config': Config(extra_fields=None, project_path='', project_name='', inc=False, reqa_file='', max_auto_summarize_code=0, git_reinit=False, llm=LLMConfig(extra_fields=None, api_key='mock_api_key', api_type=<LLMType.OPENAI: 'openai'>, base_url='mock_base_url', api_version=None, model=None, pricing_plan=None, access_key=None, secret_key=None, session_token=None, endpoint=None, app_id='mock_app_id', api_secret='mock_api_secret', domain='mock_domain', max_token=4096, temperature=0.0, top_p=1.0, top_k=0, repetition_penalty=1.0, stop=None, presence_penalty=0.0, frequency_penalty=0.0, best_of=None, n=None, stream=True, seed=None, logprobs=None, top_logprobs=None, timeout=600, context_length=None, region_name=None, proxy=None, calc_usage=True, use_system_prompt=True), embedding=EmbeddingConfig(extra_fields=None, api_type=None, api_key=None, base_url=None, api_version=None, model=None, embed_batch_size=None, dimensions=None), omniparse=OmniParseConfig(extra_fields=None, api_key='', base_url=''), proxy='', search=SearchConfig(extra_fields=None, api_type=<SearchEngineType.DUCK_DUCK_GO: 'ddg'>, api_key='', cse_id='', search_func=None, params={'engine': 'google', 'google_domain': 'google.com', 'gl': 'us', 'hl': 'en'}), browser=BrowserConfig(extra_fields=None, engine=<WebBrowserEngineType.PLAYWRIGHT: 'playwright'>, browser_type='chromium'), mermaid=MermaidConfig(extra_fields=None, engine='nodejs', path='mmdc', puppeteer_config='', pyppeteer_path='/usr/bin/google-chrome-stable'), s3=None, redis=None, repair_llm_output=False, prompt_schema='json', workspace=WorkspaceConfig(extra_fields=None, path=WindowsPath('d:/llm/metagpt/workspace'), use_uid=False, uid=''), enable_longterm_memory=False, code_review_k_times=2, agentops_api_key='', metagpt_tti_url='', language='English', redis_key='placeholder', iflytek_app_id='', iflytek_api_secret='', iflytek_api_key='', azure_tts_subscription_key='', azure_tts_region=''), 'private_llm': <metagpt.provider.openai_api.OpenAILLM object at 0x00000128F0753910>, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}
3. 多继承重写
mock_llm_config_zhipu = LLMConfig(
llm_type="zhipu",
api_key="mock_api_key.zhipu",
base_url="mock_base_url",
model="mock_zhipu_model",
proxy="http://localhost:8080",
)
def test_config_mixin_4_multi_inheritance_override_config():
"""Test config mixin with multiple inheritance"""
i = Config(llm=mock_llm_config)
j = Config(llm=mock_llm_config_zhipu)
obj = ModelY(config=i)
assert obj.config == i
assert obj.config.llm == mock_llm_config
obj.set_config(j, override=True)
# override obj.config
assert obj.config == j
assert obj.config.llm == mock_llm_config_zhipu
assert obj.a == "a"
assert obj.b == "b"
assert obj.c == "c"
assert obj.d == "d"
print(obj.__dict__.keys())
assert "private_config" in obj.__dict__.keys()
assert obj.config.llm.model == "mock_zhipu_model"
test_config_mixin_4_multi_inheritance_override_config()
dict_keys(['private_context', 'private_config', 'private_llm', 'a', 'b', 'c', 'd'])
4. 配置优先级
from pathlib import Path
import pytest
from metagpt.actions import Action
from metagpt.config2 import Config
from metagpt.const import CONFIG_ROOT
from metagpt.environment import Environment
from metagpt.roles import Role
from metagpt.team import Team
@pytest.mark.asyncio
async def test_config_priority():
"""If action's config is set, then its llm will be set, otherwise, it will use the role's llm"""
home_dir = Path.home() / CONFIG_ROOT
gpt4t = Config.from_home("gpt-4-turbo.yaml")
if not home_dir.exists():
assert gpt4t is None
gpt35 = Config.default()
gpt35.llm.model = "gpt35"
gpt4 = Config.default()
gpt4.llm.model = "gpt-4-0613"
a1 = Action(name="Say", instruction="Say your opinion with emotion and don't repeat it", config=gpt4t)
a2 = Action(name="Say", instruction="Say your opinion with emotion and don't repeat it")
a3 = Action(name="Vote", instruction="Vote for the candidate, and say why you vote for him/her")
# it will not work for a1 because the config is already set
A = Role(name="A", profile="Democratic candidate", goal="Win the election", actions=[a1], watch=[a2], config=gpt4)
# it will work for a2 because the config is not set
B = Role(name="B", profile="Republican candidate", goal="Win the election", actions=[a2], watch=[a1], config=gpt4)
# ditto
C = Role(name="C", profile="Voter", goal="Vote for the candidate", actions=[a3], watch=[a1, a2], config=gpt35)
env = Environment(desc="US election live broadcast")
Team(investment=10.0, env=env, roles=[A, B, C])
assert a1.llm.model == "gpt-4-turbo" if Path(home_dir / "gpt-4-turbo.yaml").exists() else "gpt-4-0613"
assert a2.llm.model == "gpt-4-0613"
assert a3.llm.model == "gpt35"
await test_config_priority()
如果有任何问题,欢迎在评论区提问。