code
stringlengths
141
97.3k
apis
listlengths
1
24
extract_api
stringlengths
113
214k
"""Base object types.""" import pickle import warnings from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.base.query_pipeline.query import ( ChainableMixin, InputKeys, OutputKeys, QueryComp...
[ "llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir", "llama_index.core.base.query_pipeline.query.InputKeys.from_keys", "llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects", "llama_index.core.bridge.pydantic.Field", "llama_index.core.base.query_p...
[((897, 910), 'typing.TypeVar', 'TypeVar', (['"""OT"""'], {}), "('OT')\n", (904, 910), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar\n'), ((2032, 2068), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever."""'}), "(..., description='Retriev...
"""Base object types.""" import pickle import warnings from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.base.query_pipeline.query import ( ChainableMixin, InputKeys, OutputKeys, QueryComp...
[ "llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir", "llama_index.core.base.query_pipeline.query.InputKeys.from_keys", "llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects", "llama_index.core.bridge.pydantic.Field", "llama_index.core.base.query_p...
[((897, 910), 'typing.TypeVar', 'TypeVar', (['"""OT"""'], {}), "('OT')\n", (904, 910), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar\n'), ((2032, 2068), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever."""'}), "(..., description='Retriev...
"""Base object types.""" import pickle import warnings from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.base.query_pipeline.query import ( ChainableMixin, InputKeys, OutputKeys, QueryComp...
[ "llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir", "llama_index.core.base.query_pipeline.query.InputKeys.from_keys", "llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects", "llama_index.core.bridge.pydantic.Field", "llama_index.core.base.query_p...
[((897, 910), 'typing.TypeVar', 'TypeVar', (['"""OT"""'], {}), "('OT')\n", (904, 910), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar\n'), ((2032, 2068), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever."""'}), "(..., description='Retriev...
from typing import Any, Callable, Optional, Sequence from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import llm_completion_callback from lla...
[ "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.core.llms.types.CompletionResponse" ]
[((1537, 1562), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1560, 1562), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((1876, 1901), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()...
from typing import Any, Callable, Optional, Sequence from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import llm_completion_callback from lla...
[ "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.core.llms.types.CompletionResponse" ]
[((1537, 1562), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1560, 1562), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((1876, 1901), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()...
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset("MiniCovidQaDataset", "./data") ...
[ "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.VectorStoreIndex.from_documents" ]
[((265, 319), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniCovidQaDataset"""', '"""./data"""'], {}), "('MiniCovidQaDataset', './data')\n", (287, 319), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((364, 416), 'llama_index.core.VectorStore...
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset("MiniCovidQaDataset", "./data") ...
[ "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.VectorStoreIndex.from_documents" ]
[((265, 319), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniCovidQaDataset"""', '"""./data"""'], {}), "('MiniCovidQaDataset', './data')\n", (287, 319), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((364, 416), 'llama_index.core.VectorStore...
"""Palm API.""" import os from typing import Any, Callable, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS from llama_index.legacy.core.llms.types import ( Ch...
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr" ]
[((708, 779), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_PALM_MODEL', 'description': '"""The PaLM model to use."""'}), "(default=DEFAULT_PALM_MODEL, description='The PaLM model to use.')\n", (713, 779), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((...
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, Comp...
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.llms.generic_utils.get_from_p...
[((827, 870), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The AI21 model to use."""'}), "(description='The AI21 model to use.')\n", (832, 870), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((892, 954), 'llama_index.legacy.bridge.pydantic.Field', 'Field...
import json from typing import Any, Callable, Dict, List, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatR...
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.vllm_utils.post_http_request", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.generic_utils.stream_com...
[((1015, 1065), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The HuggingFace Model to use."""'}), "(description='The HuggingFace Model to use.')\n", (1020, 1065), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1092, 1149), 'llama_index.legacy.bridge.pyd...
import json from typing import Any, Callable, Dict, List, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatR...
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.vllm_utils.post_http_request", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.generic_utils.stream_com...
[((1015, 1065), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The HuggingFace Model to use."""'}), "(description='The HuggingFace Model to use.')\n", (1020, 1065), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1092, 1149), 'llama_index.legacy.bridge.pyd...
"""Prompts.""" from abc import ABC, abstractmethod from copy import deepcopy from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Tuple, Union, ) from llama_index.core.bridge.pydantic import Field if TYPE_CHECKING: from llama_index.core.bridge.lan...
[ "llama_index.core.base.llms.generic_utils.prompt_to_messages", "llama_index.llms.langchain.utils.from_lc_messages", "llama_index.core.base.llms.types.ChatMessage.from_str", "llama_index.core.bridge.langchain.ConditionalPromptSelector", "llama_index.core.bridge.pydantic.Field", "llama_index.core.base.query...
[((1473, 1559), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Template variable mappings (Optional)."""'}), "(default_factory=dict, description=\n 'Template variable mappings (Optional).')\n", (1478, 1559), False, 'from llama_index.core.bridge.pydantic import ...
"""Prompts.""" from abc import ABC, abstractmethod from copy import deepcopy from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Tuple, Union, ) from llama_index.core.bridge.pydantic import Field if TYPE_CHECKING: from llama_index.core.bridge.lan...
[ "llama_index.core.base.llms.generic_utils.prompt_to_messages", "llama_index.llms.langchain.utils.from_lc_messages", "llama_index.core.base.llms.types.ChatMessage.from_str", "llama_index.core.bridge.langchain.ConditionalPromptSelector", "llama_index.core.bridge.pydantic.Field", "llama_index.core.base.query...
[((1473, 1559), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Template variable mappings (Optional)."""'}), "(default_factory=dict, description=\n 'Template variable mappings (Optional).')\n", (1478, 1559), False, 'from llama_index.core.bridge.pydantic import ...
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex from llama_index.llms import OpenAI async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_data...
[ "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.VectorStoreIndex.from_documents", "llama_index.llms.OpenAI" ]
[((301, 371), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""Uber10KDataset2021"""', '"""./uber10k_2021_dataset"""'], {}), "('Uber10KDataset2021', './uber10k_2021_dataset')\n", (323, 371), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((430, 482...
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex from llama_index.llms import OpenAI async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_data...
[ "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.VectorStoreIndex.from_documents", "llama_index.llms.OpenAI" ]
[((301, 371), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""Uber10KDataset2021"""', '"""./uber10k_2021_dataset"""'], {}), "('Uber10KDataset2021', './uber10k_2021_dataset')\n", (323, 371), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((430, 482...
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core.evaluation import PairwiseComparisonEvaluator from llama_index.llms import OpenAI, Gemini from llama_index.core import ServiceContext import pandas as pd ...
[ "llama_index.llms.Gemini", "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.llms.OpenAI", "llama_index.core.evaluation.PairwiseComparisonEvaluator" ]
[((402, 475), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MtBenchHumanJudgementDataset"""', '"""./mt_bench_data"""'], {}), "('MtBenchHumanJudgementDataset', './mt_bench_data')\n", (424, 475), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((16...
from abc import abstractmethod from typing import ( Any, Sequence, ) from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, ) from llama_...
[ "llama_index.core.callbacks.CallbackManager", "llama_index.core.bridge.pydantic.validator", "llama_index.core.bridge.pydantic.Field" ]
[((669, 721), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)'}), '(default_factory=CallbackManager, exclude=True)\n', (674, 721), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((800, 839), 'llama_index.core.bridge.pydantic.v...
from abc import abstractmethod from typing import ( Any, Sequence, ) from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, ) from llama_...
[ "llama_index.core.callbacks.CallbackManager", "llama_index.core.bridge.pydantic.validator", "llama_index.core.bridge.pydantic.Field" ]
[((669, 721), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)'}), '(default_factory=CallbackManager, exclude=True)\n', (674, 721), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((800, 839), 'llama_index.core.bridge.pydantic.v...
from abc import abstractmethod from typing import ( Any, Sequence, ) from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, ) from llama_...
[ "llama_index.core.callbacks.CallbackManager", "llama_index.core.bridge.pydantic.validator", "llama_index.core.bridge.pydantic.Field" ]
[((669, 721), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)'}), '(default_factory=CallbackManager, exclude=True)\n', (674, 721), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((800, 839), 'llama_index.core.bridge.pydantic.v...
from abc import abstractmethod from typing import Any, List, Sequence, Union from llama_index.core.base.query_pipeline.query import ( ChainableMixin, QueryComponent, ) from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType from llama_index...
[ "llama_index.core.query_pipeline.components.router.SelectorComponent", "llama_index.core.schema.QueryBundle", "llama_index.core.tools.types.ToolMetadata" ]
[((3300, 3332), 'llama_index.core.query_pipeline.components.router.SelectorComponent', 'SelectorComponent', ([], {'selector': 'self'}), '(selector=self)\n', (3317, 3332), False, 'from llama_index.core.query_pipeline.components.router import SelectorComponent\n'), ((1653, 1685), 'llama_index.core.tools.types.ToolMetadat...
from abc import abstractmethod from typing import Any, List, Sequence, Union from llama_index.core.base.query_pipeline.query import ( ChainableMixin, QueryComponent, ) from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType from llama_index...
[ "llama_index.core.query_pipeline.components.router.SelectorComponent", "llama_index.core.schema.QueryBundle", "llama_index.core.tools.types.ToolMetadata" ]
[((3300, 3332), 'llama_index.core.query_pipeline.components.router.SelectorComponent', 'SelectorComponent', ([], {'selector': 'self'}), '(selector=self)\n', (3317, 3332), False, 'from llama_index.core.query_pipeline.components.router import SelectorComponent\n'), ((1653, 1685), 'llama_index.core.tools.types.ToolMetadat...
from abc import abstractmethod from typing import Any, List, Sequence, Union from llama_index.core.base.query_pipeline.query import ( ChainableMixin, QueryComponent, ) from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType from llama_index...
[ "llama_index.core.query_pipeline.components.router.SelectorComponent", "llama_index.core.schema.QueryBundle", "llama_index.core.tools.types.ToolMetadata" ]
[((3300, 3332), 'llama_index.core.query_pipeline.components.router.SelectorComponent', 'SelectorComponent', ([], {'selector': 'self'}), '(selector=self)\n', (3317, 3332), False, 'from llama_index.core.query_pipeline.components.router import SelectorComponent\n'), ((1653, 1685), 'llama_index.core.tools.types.ToolMetadat...
"""Base agent type.""" import uuid from abc import abstractmethod from typing import Any, Dict, List, Optional from llama_index.legacy.bridge.pydantic import BaseModel, Field from llama_index.legacy.callbacks import CallbackManager, trace_method from llama_index.legacy.chat_engine.types import ( BaseChatEngine, ...
[ "llama_index.legacy.callbacks.trace_method", "llama_index.legacy.bridge.pydantic.Field" ]
[((1310, 1331), 'llama_index.legacy.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query')\n", (1322, 1331), False, 'from llama_index.legacy.callbacks import CallbackManager, trace_method\n'), ((1633, 1654), 'llama_index.legacy.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query...
import json from typing import Any, Dict, Sequence, Tuple import httpx from httpx import Timeout from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse,...
[ "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field" ]
[((816, 911), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '"""http://localhost:11434"""', 'description': '"""Base url the model is hosted under."""'}), "(default='http://localhost:11434', description=\n 'Base url the model is hosted under.')\n", (821, 911), False, 'from llama_index.legacy.b...
import json from typing import Any, Dict, Sequence, Tuple import httpx from httpx import Timeout from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse,...
[ "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field" ]
[((816, 911), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '"""http://localhost:11434"""', 'description': '"""Base url the model is hosted under."""'}), "(default='http://localhost:11434', description=\n 'Base url the model is hosted under.')\n", (821, 911), False, 'from llama_index.legacy.b...
import warnings from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatRes...
[ "llama_index.legacy.llms.cohere_utils.messages_to_cohere_history", "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.cohere_utils.cohere_modelname_to_contextsize", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.leg...
[((897, 942), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The cohere model to use."""'}), "(description='The cohere model to use.')\n", (902, 942), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((968, 1025), 'llama_index.legacy.bridge.pydantic.Field', '...
import warnings from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatRes...
[ "llama_index.legacy.llms.cohere_utils.messages_to_cohere_history", "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.cohere_utils.cohere_modelname_to_contextsize", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.leg...
[((897, 942), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The cohere model to use."""'}), "(description='The cohere model to use.')\n", (902, 942), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((968, 1025), 'llama_index.legacy.bridge.pydantic.Field', '...
from abc import abstractmethod from typing import List from llama_index.core.indices.query.schema import QueryBundle, QueryType from llama_index.core.prompts.mixin import PromptMixin from llama_index.core.schema import NodeWithScore class BaseImageRetriever(PromptMixin): """Base Image Retriever Abstraction.""" ...
[ "llama_index.core.indices.query.schema.QueryBundle" ]
[((706, 748), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'str_or_query_bundle'}), '(query_str=str_or_query_bundle)\n', (717, 748), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n'), ((1525, 1582), 'llama_index.core.indices.query.schema.Query...
from abc import abstractmethod from typing import List from llama_index.core.indices.query.schema import QueryBundle, QueryType from llama_index.core.prompts.mixin import PromptMixin from llama_index.core.schema import NodeWithScore class BaseImageRetriever(PromptMixin): """Base Image Retriever Abstraction.""" ...
[ "llama_index.core.indices.query.schema.QueryBundle" ]
[((706, 748), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'str_or_query_bundle'}), '(query_str=str_or_query_bundle)\n', (717, 748), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n'), ((1525, 1582), 'llama_index.core.indices.query.schema.Query...
import json from abc import abstractmethod from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Dict, Optional, Type if TYPE_CHECKING: from llama_index.legacy.bridge.langchain import StructuredTool, Tool from deprecated import deprecated from llama_index.legacy.bridge.pydantic import BaseModel...
[ "llama_index.legacy.bridge.langchain.Tool.from_function", "llama_index.legacy.bridge.langchain.StructuredTool.from_function" ]
[((1586, 1675), 'deprecated.deprecated', 'deprecated', (['"""Deprecated in favor of `to_openai_tool`, which should be used instead."""'], {}), "(\n 'Deprecated in favor of `to_openai_tool`, which should be used instead.')\n", (1596, 1675), False, 'from deprecated import deprecated\n'), ((1400, 1422), 'json.dumps', '...
import json from abc import abstractmethod from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Dict, Optional, Type if TYPE_CHECKING: from llama_index.legacy.bridge.langchain import StructuredTool, Tool from deprecated import deprecated from llama_index.legacy.bridge.pydantic import BaseModel...
[ "llama_index.legacy.bridge.langchain.Tool.from_function", "llama_index.legacy.bridge.langchain.StructuredTool.from_function" ]
[((1586, 1675), 'deprecated.deprecated', 'deprecated', (['"""Deprecated in favor of `to_openai_tool`, which should be used instead."""'], {}), "(\n 'Deprecated in favor of `to_openai_tool`, which should be used instead.')\n", (1596, 1675), False, 'from deprecated import deprecated\n'), ((1400, 1422), 'json.dumps', '...
import logging from dataclasses import dataclass from typing import Any, List, Optional, cast from deprecated import deprecated import llama_index.core from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.callbacks.base import CallbackManager from llama_index.core.base.embeddings.base import B...
[ "llama_index.core.embeddings.utils.resolve_embed_model", "llama_index.core.node_parser.loading.load_parser", "llama_index.core.extractors.loading.load_extractor", "llama_index.core.callbacks.base.CallbackManager", "llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata", "llama_index.core.s...
[((1138, 1165), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1155, 1165), False, 'import logging\n'), ((1940, 1997), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_met...
import logging from dataclasses import dataclass from typing import Any, List, Optional, cast from deprecated import deprecated import llama_index.core from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.callbacks.base import CallbackManager from llama_index.core.base.embeddings.base import B...
[ "llama_index.core.embeddings.utils.resolve_embed_model", "llama_index.core.node_parser.loading.load_parser", "llama_index.core.extractors.loading.load_extractor", "llama_index.core.callbacks.base.CallbackManager", "llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata", "llama_index.core.s...
[((1138, 1165), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1155, 1165), False, 'import logging\n'), ((1940, 1997), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_met...
from typing import List, Optional import fsspec from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.schema import BaseNode from llama_index.core.storage.docstore.utils import doc_to_json, json_to_doc from llama_index.core.storage.kvstore import ( SimpleKVStore as SimpleCache, ) from...
[ "llama_index.core.storage.docstore.utils.json_to_doc", "llama_index.core.storage.docstore.utils.doc_to_json", "llama_index.core.storage.kvstore.SimpleKVStore.from_persist_path", "llama_index.core.bridge.pydantic.Field" ]
[((577, 655), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CACHE_NAME', 'description': '"""Collection name of the cache."""'}), "(default=DEFAULT_CACHE_NAME, description='Collection name of the cache.')\n", (582, 655), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field...
from typing import List, Optional import fsspec from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.schema import BaseNode from llama_index.core.storage.docstore.utils import doc_to_json, json_to_doc from llama_index.core.storage.kvstore import ( SimpleKVStore as SimpleCache, ) from...
[ "llama_index.core.storage.docstore.utils.json_to_doc", "llama_index.core.storage.docstore.utils.doc_to_json", "llama_index.core.storage.kvstore.SimpleKVStore.from_persist_path", "llama_index.core.bridge.pydantic.Field" ]
[((577, 655), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CACHE_NAME', 'description': '"""Collection name of the cache."""'}), "(default=DEFAULT_CACHE_NAME, description='Collection name of the cache.')\n", (582, 655), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field...
"""Base object types.""" import pickle import warnings from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.callbacks.base import CallbackManager from llama_index.legacy.core.base_retriever import BaseRetriever from...
[ "llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys", "llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects", "llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys", "llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMappin...
[((925, 938), 'typing.TypeVar', 'TypeVar', (['"""OT"""'], {}), "('OT')\n", (932, 938), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar\n'), ((2060, 2096), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever."""'}), "(..., description='Retri...
"""Base object types.""" import pickle import warnings from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.callbacks.base import CallbackManager from llama_index.legacy.core.base_retriever import BaseRetriever from...
[ "llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys", "llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects", "llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys", "llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMappin...
[((925, 938), 'typing.TypeVar', 'TypeVar', (['"""OT"""'], {}), "('OT')\n", (932, 938), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar\n'), ((2060, 2096), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever."""'}), "(..., description='Retri...
"""Base reader class.""" from abc import ABC from typing import TYPE_CHECKING, Any, Dict, Iterable, List if TYPE_CHECKING: from llama_index.legacy.bridge.langchain import Document as LCDocument from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.schema import BaseComponent, Document cla...
[ "llama_index.legacy.bridge.pydantic.Field" ]
[((1225, 1327), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the data is loaded from a remote API or a local file."""'}), "(default=False, description=\n 'Whether the data is loaded from a remote API or a local file.')\n", (1230, 1327), False, 'from llam...
"""Base reader class.""" from abc import ABC from typing import TYPE_CHECKING, Any, Dict, Iterable, List if TYPE_CHECKING: from llama_index.legacy.bridge.langchain import Document as LCDocument from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.schema import BaseComponent, Document cla...
[ "llama_index.legacy.bridge.pydantic.Field" ]
[((1225, 1327), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the data is loaded from a remote API or a local file."""'}), "(default=False, description=\n 'Whether the data is loaded from a remote API or a local file.')\n", (1230, 1327), False, 'from llam...
""" Portkey integration with Llama_index for enhanced monitoring. """ from typing import TYPE_CHECKING, Any, Callable, List, Optional, Sequence, Union, cast from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatRes...
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator", "llama_index.legacy.llms.portkey_utils.is_chat_model", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llm...
[((1284, 1347), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The mode for using the Portkey integration"""'}), "(description='The mode for using the Portkey integration')\n", (1289, 1347), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1390, 1426), 'lla...
""" Portkey integration with Llama_index for enhanced monitoring. """ from typing import TYPE_CHECKING, Any, Callable, List, Optional, Sequence, Union, cast from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatRes...
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator", "llama_index.legacy.llms.portkey_utils.is_chat_model", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llm...
[((1284, 1347), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The mode for using the Portkey integration"""'}), "(description='The mode for using the Portkey integration')\n", (1289, 1347), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1390, 1426), 'lla...
"""Query plan tool.""" from typing import Any, Dict, List, Optional from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.response_synthesizers import ( BaseSynthesizer, get_response_synthesizer, ) from llama_index.core.schema import NodeWithScore, TextNode from llama_index.core....
[ "llama_index.core.tools.types.ToolMetadata", "llama_index.core.utils.print_text", "llama_index.core.response_synthesizers.get_response_synthesizer", "llama_index.core.bridge.pydantic.Field", "llama_index.core.schema.TextNode", "llama_index.core.schema.NodeWithScore" ]
[((1418, 1465), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""ID of the query node."""'}), "(..., description='ID of the query node.')\n", (1423, 1465), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1487, 1535), 'llama_index.core.bridge.pydantic.Field', ...
"""Query plan tool.""" from typing import Any, Dict, List, Optional from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.response_synthesizers import ( BaseSynthesizer, get_response_synthesizer, ) from llama_index.core.schema import NodeWithScore, TextNode from llama_index.core....
[ "llama_index.core.tools.types.ToolMetadata", "llama_index.core.utils.print_text", "llama_index.core.response_synthesizers.get_response_synthesizer", "llama_index.core.bridge.pydantic.Field", "llama_index.core.schema.TextNode", "llama_index.core.schema.NodeWithScore" ]
[((1418, 1465), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""ID of the query node."""'}), "(..., description='ID of the query node.')\n", (1423, 1465), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1487, 1535), 'llama_index.core.bridge.pydantic.Field', ...
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, Co...
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llms.watsonx_utils.get_from_param_or_env_without_error", "...
[((968, 1006), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Model to use."""'}), "(description='The Model to use.')\n", (973, 1006), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1033, 1095), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([]...
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, Co...
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llms.watsonx_utils.get_from_param_or_env_without_error", "...
[((968, 1006), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Model to use."""'}), "(description='The Model to use.')\n", (973, 1006), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1033, 1095), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([]...
from typing import Any, Awaitable, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_TEMPERATURE from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResp...
[ "llama_index.legacy.llms.generic_utils.astream_completion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator", "llama_index.legacy.llms.generic_utils.acompletion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator", "lla...
[((1378, 1535), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_LITELLM_MODEL', 'description': '"""The LiteLLM model to use. For complete list of providers https://docs.litellm.ai/docs/providers"""'}), "(default=DEFAULT_LITELLM_MODEL, description=\n 'The LiteLLM model to use. For compl...
from typing import Any, Awaitable, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_TEMPERATURE from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResp...
[ "llama_index.legacy.llms.generic_utils.astream_completion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator", "llama_index.legacy.llms.generic_utils.acompletion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator", "lla...
[((1378, 1535), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_LITELLM_MODEL', 'description': '"""The LiteLLM model to use. For complete list of providers https://docs.litellm.ai/docs/providers"""'}), "(default=DEFAULT_LITELLM_MODEL, description=\n 'The LiteLLM model to use. For compl...
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset("MiniTruthfulQADataset", "./data...
[ "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.VectorStoreIndex.from_documents" ]
[((265, 322), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniTruthfulQADataset"""', '"""./data"""'], {}), "('MiniTruthfulQADataset', './data')\n", (287, 322), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((367, 419), 'llama_index.core.Vecto...
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset("MiniTruthfulQADataset", "./data...
[ "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.VectorStoreIndex.from_documents" ]
[((265, 322), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniTruthfulQADataset"""', '"""./data"""'], {}), "('MiniTruthfulQADataset', './data')\n", (287, 322), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((367, 419), 'llama_index.core.Vecto...
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_TEMPERATURE # from mistralai.models.chat_completion import ChatMessage from llama_index...
[ "llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator", "llama_index.legacy.llms.base.llm_completion_callback", "...
[((1271, 1357), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_MISTRALAI_MODEL', 'description': '"""The mistralai model to use."""'}), "(default=DEFAULT_MISTRALAI_MODEL, description=\n 'The mistralai model to use.')\n", (1276, 1357), False, 'from llama_index.legacy.bridge.pydantic imp...
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_TEMPERATURE # from mistralai.models.chat_completion import ChatMessage from llama_index...
[ "llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator", "llama_index.legacy.llms.base.llm_completion_callback", "...
[((1271, 1357), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_MISTRALAI_MODEL', 'description': '"""The mistralai model to use."""'}), "(default=DEFAULT_MISTRALAI_MODEL, description=\n 'The mistralai model to use.')\n", (1276, 1357), False, 'from llama_index.legacy.bridge.pydantic imp...
"""Base index classes.""" import logging from abc import ABC, abstractmethod from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast from llama_index.legacy.chat_engine.types import BaseChatEngine, ChatMode from llama_index.legacy.core.base_query_engine import BaseQueryEngine from llama_i...
[ "llama_index.legacy.agent.AgentRunner.from_llm", "llama_index.legacy.ingestion.run_transformations", "llama_index.legacy.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args", "llama_index.legacy.tools.query_engine.QueryEngineTool.from_defaults", "llama_index.legacy.storage.storage_context.Sto...
[((793, 825), 'typing.TypeVar', 'TypeVar', (['"""IS"""'], {'bound': 'IndexStruct'}), "('IS', bound=IndexStruct)\n", (800, 825), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast\n'), ((838, 877), 'typing.TypeVar', 'TypeVar', (['"""IndexType"""'], {'bound': '"""BaseIndex"""'}),...
"""Base index classes.""" import logging from abc import ABC, abstractmethod from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast from llama_index.legacy.chat_engine.types import BaseChatEngine, ChatMode from llama_index.legacy.core.base_query_engine import BaseQueryEngine from llama_i...
[ "llama_index.legacy.agent.AgentRunner.from_llm", "llama_index.legacy.ingestion.run_transformations", "llama_index.legacy.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args", "llama_index.legacy.tools.query_engine.QueryEngineTool.from_defaults", "llama_index.legacy.storage.storage_context.Sto...
[((793, 825), 'typing.TypeVar', 'TypeVar', (['"""IS"""'], {'bound': 'IndexStruct'}), "('IS', bound=IndexStruct)\n", (800, 825), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast\n'), ((838, 877), 'typing.TypeVar', 'TypeVar', (['"""IndexType"""'], {'bound': '"""BaseIndex"""'}),...
import json from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import ( DEFAULT_TEMPERATURE, ) from llama_index.legacy.core.llms.types import ( Ch...
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response", "llama_index.legacy....
[((1084, 1145), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The modelId of the Bedrock model to use."""'}), "(description='The modelId of the Bedrock model to use.')\n", (1089, 1145), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1171, 1228), 'llama_i...
import json from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import ( DEFAULT_TEMPERATURE, ) from llama_index.legacy.core.llms.types import ( Ch...
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response", "llama_index.legacy....
[((1084, 1145), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The modelId of the Bedrock model to use."""'}), "(description='The modelId of the Bedrock model to use.')\n", (1089, 1145), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1171, 1228), 'llama_i...
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset( "BraintrustCodaHelpDesk...
[ "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.VectorStoreIndex.from_documents" ]
[((265, 344), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""BraintrustCodaHelpDeskDataset"""', '"""./braintrust_codahdd"""'], {}), "('BraintrustCodaHelpDeskDataset', './braintrust_codahdd')\n", (287, 344), False, 'from llama_index.core.llama_dataset import download_llama_datas...
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset( "BraintrustCodaHelpDesk...
[ "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.VectorStoreIndex.from_documents" ]
[((265, 344), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""BraintrustCodaHelpDeskDataset"""', '"""./braintrust_codahdd"""'], {}), "('BraintrustCodaHelpDeskDataset', './braintrust_codahdd')\n", (287, 344), False, 'from llama_index.core.llama_dataset import download_llama_datas...
from typing import Any, Dict, Optional from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.constants import ( DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE, ) from llama_index.legacy.core.llms.types import LLMMetadata from llama_index.legacy.llms.generic_utils import get_from_param_or_env f...
[ "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llms.generic_utils.get_from_param_or_env" ]
[((548, 659), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Neutrino router to use. See https://docs.neutrinoapp.com/router for details."""'}), "(description=\n 'The Neutrino router to use. See https://docs.neutrinoapp.com/router for details.'\n )\n", (553, 659), False, 'from l...
"""Tree-based index.""" from enum import Enum from typing import Any, Dict, Optional, Sequence, Union from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.base.embeddings.base import BaseEmbedding # from llama_index.core.data_structs.data_structs import IndexGraph from llama_index.cor...
[ "llama_index.core.settings.llm_from_settings_or_context", "llama_index.core.indices.tree.tree_root_retriever.TreeRootRetriever", "llama_index.core.indices.tree.select_leaf_retriever.TreeSelectLeafRetriever", "llama_index.core.indices.tree.inserter.TreeIndexInserter", "llama_index.core.indices.tree.select_le...
[((5992, 6202), 'llama_index.core.indices.common_tree.base.GPTTreeIndexBuilder', 'GPTTreeIndexBuilder', (['self.num_children', 'self.summary_template'], {'service_context': 'self.service_context', 'llm': 'self._llm', 'use_async': 'self._use_async', 'show_progress': 'self._show_progress', 'docstore': 'self._docstore'}),...
"""Tree-based index.""" from enum import Enum from typing import Any, Dict, Optional, Sequence, Union from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.base.embeddings.base import BaseEmbedding # from llama_index.core.data_structs.data_structs import IndexGraph from llama_index.cor...
[ "llama_index.core.settings.llm_from_settings_or_context", "llama_index.core.indices.tree.tree_root_retriever.TreeRootRetriever", "llama_index.core.indices.tree.select_leaf_retriever.TreeSelectLeafRetriever", "llama_index.core.indices.tree.inserter.TreeIndexInserter", "llama_index.core.indices.tree.select_le...
[((5992, 6202), 'llama_index.core.indices.common_tree.base.GPTTreeIndexBuilder', 'GPTTreeIndexBuilder', (['self.num_children', 'self.summary_template'], {'service_context': 'self.service_context', 'llm': 'self._llm', 'use_async': 'self._use_async', 'show_progress': 'self._show_progress', 'docstore': 'self._docstore'}),...
"""Tree-based index.""" from enum import Enum from typing import Any, Dict, Optional, Sequence, Union from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.base.embeddings.base import BaseEmbedding # from llama_index.core.data_structs.data_structs import IndexGraph from llama_index.cor...
[ "llama_index.core.settings.llm_from_settings_or_context", "llama_index.core.indices.tree.tree_root_retriever.TreeRootRetriever", "llama_index.core.indices.tree.select_leaf_retriever.TreeSelectLeafRetriever", "llama_index.core.indices.tree.inserter.TreeIndexInserter", "llama_index.core.indices.tree.select_le...
[((5992, 6202), 'llama_index.core.indices.common_tree.base.GPTTreeIndexBuilder', 'GPTTreeIndexBuilder', (['self.num_children', 'self.summary_template'], {'service_context': 'self.service_context', 'llm': 'self._llm', 'use_async': 'self._use_async', 'show_progress': 'self._show_progress', 'docstore': 'self._docstore'}),...
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, Co...
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.core.llms.types.ChatMessage",...
[((762, 827), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': 'f"""Full URL of the model. e.g. `{EXAMPLE_URL}`"""'}), "(description=f'Full URL of the model. e.g. `{EXAMPLE_URL}`')\n", (767, 827), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((880, 918), 'llama...
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, Co...
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.core.llms.types.ChatMessage",...
[((762, 827), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': 'f"""Full URL of the model. e.g. `{EXAMPLE_URL}`"""'}), "(description=f'Full URL of the model. e.g. `{EXAMPLE_URL}`')\n", (767, 827), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((880, 918), 'llama...
"""PII postprocessor.""" import json from copy import deepcopy from typing import Callable, Dict, List, Optional, Tuple from llama_index.core.llms.llm import LLM from llama_index.core.postprocessor.types import BaseNodePostprocessor from llama_index.core.prompts.base import PromptTemplate from llama_index.core.schema ...
[ "llama_index.core.prompts.base.PromptTemplate", "llama_index.core.schema.NodeWithScore" ]
[((2092, 2125), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['self.pii_str_tmpl'], {}), '(self.pii_str_tmpl)\n', (2106, 2125), False, 'from llama_index.core.prompts.base import PromptTemplate\n'), ((2560, 2587), 'json.loads', 'json.loads', (['json_str_output'], {}), '(json_str_output)\n', (2570, ...
"""PII postprocessor.""" import json from copy import deepcopy from typing import Callable, Dict, List, Optional, Tuple from llama_index.core.llms.llm import LLM from llama_index.core.postprocessor.types import BaseNodePostprocessor from llama_index.core.prompts.base import PromptTemplate from llama_index.core.schema ...
[ "llama_index.core.prompts.base.PromptTemplate", "llama_index.core.schema.NodeWithScore" ]
[((2092, 2125), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['self.pii_str_tmpl'], {}), '(self.pii_str_tmpl)\n', (2106, 2125), False, 'from llama_index.core.prompts.base import PromptTemplate\n'), ((2560, 2587), 'json.loads', 'json.loads', (['json_str_output'], {}), '(json_str_output)\n', (2570, ...
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE from llama_index.legacy.core.llms.types import ChatMessage, LLMMetadata from llama_index.legacy.llms.everlyai_utils impor...
[ "llama_index.legacy.callbacks.CallbackManager", "llama_index.legacy.llms.generic_utils.get_from_param_or_env", "llama_index.legacy.llms.everlyai_utils.everlyai_modelname_to_contextsize" ]
[((1525, 1586), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_key"""', 'api_key', '"""EverlyAI_API_KEY"""'], {}), "('api_key', api_key, 'EverlyAI_API_KEY')\n", (1546, 1586), False, 'from llama_index.legacy.llms.generic_utils import get_from_param_or_env\n'), ((1486, 1...
"""txtai reader.""" from typing import Any, Dict, List import numpy as np from llama_index.legacy.readers.base import BaseReader from llama_index.legacy.schema import Document class TxtaiReader(BaseReader): """txtai reader. Retrieves documents through an existing in-memory txtai index. These documents...
[ "llama_index.legacy.schema.Document" ]
[((2425, 2444), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2433, 2444), False, 'from llama_index.legacy.schema import Document\n'), ((2194, 2213), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2202, 2213), False, 'from llama_...
"""txtai reader.""" from typing import Any, Dict, List import numpy as np from llama_index.legacy.readers.base import BaseReader from llama_index.legacy.schema import Document class TxtaiReader(BaseReader): """txtai reader. Retrieves documents through an existing in-memory txtai index. These documents...
[ "llama_index.legacy.schema.Document" ]
[((2425, 2444), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2433, 2444), False, 'from llama_index.legacy.schema import Document\n'), ((2194, 2213), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2202, 2213), False, 'from llama_...
from llama_index.core.prompts.base import PromptTemplate from llama_index.core.prompts.prompt_type import PromptType """Single select prompt. PromptTemplate to select one out of `num_choices` options provided in `context_list`, given a query `query_str`. Required template variables: `num_chunks`, `context_list`, `qu...
[ "llama_index.core.prompts.base.PromptTemplate" ]
[((1156, 1257), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_SINGLE_SELECT_PROMPT_TMPL', 'prompt_type': 'PromptType.SINGLE_SELECT'}), '(template=DEFAULT_SINGLE_SELECT_PROMPT_TMPL, prompt_type=\n PromptType.SINGLE_SELECT)\n', (1170, 1257), False, 'from llama_index.core....
from llama_index.core.prompts.base import PromptTemplate from llama_index.core.prompts.prompt_type import PromptType """Single select prompt. PromptTemplate to select one out of `num_choices` options provided in `context_list`, given a query `query_str`. Required template variables: `num_chunks`, `context_list`, `qu...
[ "llama_index.core.prompts.base.PromptTemplate" ]
[((1156, 1257), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_SINGLE_SELECT_PROMPT_TMPL', 'prompt_type': 'PromptType.SINGLE_SELECT'}), '(template=DEFAULT_SINGLE_SELECT_PROMPT_TMPL, prompt_type=\n PromptType.SINGLE_SELECT)\n', (1170, 1257), False, 'from llama_index.core....
from llama_index.core.prompts.base import PromptTemplate from llama_index.core.prompts.prompt_type import PromptType """Single select prompt. PromptTemplate to select one out of `num_choices` options provided in `context_list`, given a query `query_str`. Required template variables: `num_chunks`, `context_list`, `qu...
[ "llama_index.core.prompts.base.PromptTemplate" ]
[((1156, 1257), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_SINGLE_SELECT_PROMPT_TMPL', 'prompt_type': 'PromptType.SINGLE_SELECT'}), '(template=DEFAULT_SINGLE_SELECT_PROMPT_TMPL, prompt_type=\n PromptType.SINGLE_SELECT)\n', (1170, 1257), False, 'from llama_index.core....
"""Awadb reader.""" from typing import Any, List import numpy as np from llama_index.legacy.readers.base import BaseReader from llama_index.legacy.schema import Document class AwadbReader(BaseReader): """Awadb reader. Retrieves documents through an existing awadb client. These documents ...
[ "llama_index.legacy.schema.Document" ]
[((1780, 1824), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': "item_detail['embedding_text']"}), "(text=item_detail['embedding_text'])\n", (1788, 1824), False, 'from llama_index.legacy.schema import Document\n'), ((2042, 2061), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), ...
"""Mongo client.""" from typing import Dict, Iterable, List, Optional, Union from llama_index.legacy.readers.base import BaseReader from llama_index.legacy.schema import Document class SimpleMongoReader(BaseReader): """Simple mongo reader. Concatenates each Mongo doc into Document used by LlamaIndex. ...
[ "llama_index.legacy.schema.Document" ]
[((887, 903), 'pymongo.MongoClient', 'MongoClient', (['uri'], {}), '(uri)\n', (898, 903), False, 'from pymongo import MongoClient\n'), ((953, 976), 'pymongo.MongoClient', 'MongoClient', (['host', 'port'], {}), '(host, port)\n', (964, 976), False, 'from pymongo import MongoClient\n'), ((3133, 3152), 'llama_index.legacy....
"""Mongo client.""" from typing import Dict, Iterable, List, Optional, Union from llama_index.legacy.readers.base import BaseReader from llama_index.legacy.schema import Document class SimpleMongoReader(BaseReader): """Simple mongo reader. Concatenates each Mongo doc into Document used by LlamaIndex. ...
[ "llama_index.legacy.schema.Document" ]
[((887, 903), 'pymongo.MongoClient', 'MongoClient', (['uri'], {}), '(uri)\n', (898, 903), False, 'from pymongo import MongoClient\n'), ((953, 976), 'pymongo.MongoClient', 'MongoClient', (['host', 'port'], {}), '(host, port)\n', (964, 976), False, 'from pymongo import MongoClient\n'), ((3133, 3152), 'llama_index.legacy....
from typing import Any, Callable, Optional, Sequence from typing_extensions import override from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS from llama_index.legacy.core.llms.types im...
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr" ]
[((660, 673), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (671, 673), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((687, 700), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (698, 700), False, 'from llama_index...
from typing import Any, Callable, Optional, Sequence from typing_extensions import override from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS from llama_index.legacy.core.llms.types im...
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr" ]
[((660, 673), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (671, 673), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((687, 700), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (698, 700), False, 'from llama_index...
from typing import Dict, Type from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.embeddings.mock_embed_model import MockEmbedding RECOGNIZED_EMBEDDINGS: Dict[str, Type[BaseEmbedding]] = { MockEmbedding.class_name(): MockEmbedding, } # conditionals for llama-cloud support try: ...
[ "llama_index.core.embeddings.mock_embed_model.MockEmbedding.class_name", "llama_index.embeddings.huggingface.HuggingFaceInferenceAPIEmbedding.class_name", "llama_index.embeddings.openai.OpenAIEmbedding.class_name", "llama_index.embeddings.azure_openai.AzureOpenAIEmbedding.class_name" ]
[((229, 255), 'llama_index.core.embeddings.mock_embed_model.MockEmbedding.class_name', 'MockEmbedding.class_name', ([], {}), '()\n', (253, 255), False, 'from llama_index.core.embeddings.mock_embed_model import MockEmbedding\n'), ((431, 459), 'llama_index.embeddings.openai.OpenAIEmbedding.class_name', 'OpenAIEmbedding.c...
from typing import Dict, Type from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.embeddings.mock_embed_model import MockEmbedding RECOGNIZED_EMBEDDINGS: Dict[str, Type[BaseEmbedding]] = { MockEmbedding.class_name(): MockEmbedding, } # conditionals for llama-cloud support try: ...
[ "llama_index.core.embeddings.mock_embed_model.MockEmbedding.class_name", "llama_index.embeddings.huggingface.HuggingFaceInferenceAPIEmbedding.class_name", "llama_index.embeddings.openai.OpenAIEmbedding.class_name", "llama_index.embeddings.azure_openai.AzureOpenAIEmbedding.class_name" ]
[((229, 255), 'llama_index.core.embeddings.mock_embed_model.MockEmbedding.class_name', 'MockEmbedding.class_name', ([], {}), '()\n', (253, 255), False, 'from llama_index.core.embeddings.mock_embed_model import MockEmbedding\n'), ((431, 459), 'llama_index.embeddings.openai.OpenAIEmbedding.class_name', 'OpenAIEmbedding.c...
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core.evaluation import CorrectnessEvaluator from llama_index.llms import OpenAI, Gemini from llama_index.core import ServiceContext import pandas as pd async d...
[ "llama_index.llms.Gemini", "llama_index.core.evaluation.CorrectnessEvaluator", "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.llms.OpenAI" ]
[((386, 471), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniMtBenchSingleGradingDataset"""', '"""./mini_mt_bench_data"""'], {}), "('MiniMtBenchSingleGradingDataset',\n './mini_mt_bench_data')\n", (408, 471), False, 'from llama_index.core.llama_dataset import download_ll...
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset( "PaulGrahamEssayDataset...
[ "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.VectorStoreIndex.from_documents" ]
[((265, 330), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""PaulGrahamEssayDataset"""', '"""./paul_graham"""'], {}), "('PaulGrahamEssayDataset', './paul_graham')\n", (287, 330), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((389, 441), 'llama_...
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset( "PaulGrahamEssayDataset...
[ "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.VectorStoreIndex.from_documents" ]
[((265, 330), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""PaulGrahamEssayDataset"""', '"""./paul_graham"""'], {}), "('PaulGrahamEssayDataset', './paul_graham')\n", (287, 330), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((389, 441), 'llama_...
from typing import TYPE_CHECKING, Any, Optional from llama_index.core.base.base_query_engine import BaseQueryEngine if TYPE_CHECKING: from llama_index.core.langchain_helpers.agents.tools import ( LlamaIndexTool, ) from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput DEFAUL...
[ "llama_index.core.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config", "llama_index.core.langchain_helpers.agents.tools.IndexToolConfig", "llama_index.core.tools.types.ToolMetadata" ]
[((1402, 1450), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': 'name', 'description': 'description'}), '(name=name, description=description)\n', (1414, 1450), False, 'from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput\n'), ((3560, 3675), 'llama_index.core.langch...
from typing import TYPE_CHECKING, Any, Optional from llama_index.core.base.base_query_engine import BaseQueryEngine if TYPE_CHECKING: from llama_index.core.langchain_helpers.agents.tools import ( LlamaIndexTool, ) from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput DEFAUL...
[ "llama_index.core.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config", "llama_index.core.langchain_helpers.agents.tools.IndexToolConfig", "llama_index.core.tools.types.ToolMetadata" ]
[((1402, 1450), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': 'name', 'description': 'description'}), '(name=name, description=description)\n', (1414, 1450), False, 'from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput\n'), ((3560, 3675), 'llama_index.core.langch...
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS from llama_index.legacy.core.llms.types import ( ChatMessage, ChatRe...
[ "llama_index.legacy.llms.openai_utils.from_openai_message_dict", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.Priva...
[((868, 916), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The llama-api model to use."""'}), "(description='The llama-api model to use.')\n", (873, 916), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((942, 999), 'llama_index.legacy.bridge.pydantic.Fiel...
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS from llama_index.legacy.core.llms.types import ( ChatMessage, ChatRe...
[ "llama_index.legacy.llms.openai_utils.from_openai_message_dict", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.Priva...
[((868, 916), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The llama-api model to use."""'}), "(description='The llama-api model to use.')\n", (873, 916), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((942, 999), 'llama_index.legacy.bridge.pydantic.Fiel...
"""Download tool from Llama Hub.""" from typing import Optional, Type from llama_index.legacy.download.module import ( LLAMA_HUB_URL, MODULE_TYPE, download_llama_module, track_download, ) from llama_index.legacy.tools.tool_spec.base import BaseToolSpec def download_tool( tool_class: str, lla...
[ "llama_index.legacy.download.module.download_llama_module", "llama_index.legacy.download.module.track_download" ]
[((867, 1047), 'llama_index.legacy.download.module.download_llama_module', 'download_llama_module', (['tool_class'], {'llama_hub_url': 'llama_hub_url', 'refresh_cache': 'refresh_cache', 'custom_dir': '"""tools"""', 'custom_path': 'custom_path', 'library_path': '"""tools/library.json"""'}), "(tool_class, llama_hub_url=l...
"""Simple Engine.""" import json import os from typing import Any, Optional, Union from llama_index.core import SimpleDirectoryReader, VectorStoreIndex from llama_index.core.callbacks.base import CallbackManager from llama_index.core.embeddings import BaseEmbedding from llama_index.core.embeddings.mock_embed_model im...
[ "llama_index.core.node_parser.SentenceSplitter", "llama_index.core.embeddings.mock_embed_model.MockEmbedding", "llama_index.core.response_synthesizers.get_response_synthesizer", "llama_index.core.ingestion.pipeline.run_transformations", "llama_index.core.SimpleDirectoryReader", "llama_index.core.schema.Qu...
[((7145, 7220), 'llama_index.core.ingestion.pipeline.run_transformations', 'run_transformations', (['documents'], {'transformations': 'self.index._transformations'}), '(documents, transformations=self.index._transformations)\n', (7164, 7220), False, 'from llama_index.core.ingestion.pipeline import run_transformations\n...
import os from typing import Optional, Dict import openai import pandas as pd import llama_index from llama_index.llms.openai import OpenAI from llama_index.readers.schema.base import Document from llama_index.readers import SimpleWebPageReader from llama_index.prompts import PromptTemplate from llama_index import Se...
[ "llama_index.OpenAIEmbedding", "llama_index.ServiceContext.from_defaults", "llama_index.StorageContext.from_defaults", "llama_index.prompts.PromptTemplate", "llama_index.readers.schema.base.Document", "llama_index.readers.SimpleWebPageReader", "llama_index.load_index_from_storage", "llama_index.llms.o...
[((9647, 9699), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'index_path'}), '(persist_dir=index_path)\n', (9675, 9699), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((9770, 9843), 'llama_index.load_index_from_storage', ...
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: MIT # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without res...
[ "llama_index.llms.base.LLMMetadata", "llama_index.bridge.pydantic.Field", "llama_index.llms.base.llm_completion_callback", "llama_index.bridge.pydantic.PrivateAttr", "llama_index.llms.base.llm_chat_callback", "llama_index.llms.generic_utils.completion_response_to_chat_response" ]
[((2151, 2199), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""The path to the trt engine."""'}), "(description='The path to the trt engine.')\n", (2156, 2199), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2239, 2296), 'llama_index.bridge.pydantic.Field', 'Field', ([...
from llama_index.core.callbacks.schema import CBEventType, EventPayload from llama_index.core.llms import ChatMessage, ChatResponse from llama_index.core.schema import NodeWithScore, TextNode import chainlit as cl @cl.on_chat_start async def start(): await cl.Message(content="LlamaIndexCb").send() cb = cl.L...
[ "llama_index.core.schema.TextNode", "llama_index.core.llms.ChatMessage" ]
[((316, 346), 'chainlit.LlamaIndexCallbackHandler', 'cl.LlamaIndexCallbackHandler', ([], {}), '()\n', (344, 346), True, 'import chainlit as cl\n'), ((415, 428), 'chainlit.sleep', 'cl.sleep', (['(0.2)'], {}), '(0.2)\n', (423, 428), True, 'import chainlit as cl\n'), ((691, 704), 'chainlit.sleep', 'cl.sleep', (['(0.2)'], ...
import requests from bs4 import BeautifulSoup from llama_index import GPTSimpleVectorIndex from llama_index.readers.database import DatabaseReader from env import settings from logger import logger from .base import BaseToolSet, SessionGetter, ToolScope, tool class RequestsGet(BaseToolSet): @tool( name=...
[ "llama_index.readers.database.DatabaseReader", "llama_index.GPTSimpleVectorIndex" ]
[((713, 732), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html'], {}), '(html)\n', (726, 732), False, 'from bs4 import BeautifulSoup\n'), ((1073, 1166), 'logger.logger.debug', 'logger.debug', (['f"""\nProcessed RequestsGet, Input Url: {url} Output Contents: {content}"""'], {}), '(\n f"""\nProcessed RequestsGet, Input U...
try: from llama_index import Document from llama_index.text_splitter import SentenceSplitter except ImportError: from llama_index.core import Document from llama_index.core.text_splitter import SentenceSplitter def llama_index_sentence_splitter( documents: list[str], document_ids: list[str], chunk...
[ "llama_index.core.text_splitter.SentenceSplitter", "llama_index.core.Document" ]
[((432, 500), 'llama_index.core.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (448, 500), False, 'from llama_index.core.text_splitter import SentenceSplitter\n'), ((514, 532), 'llama_in...
""" Creates RAG dataset for tutorial notebooks and persists to disk. """ import argparse import logging import sys from typing import List, Optional import llama_index import numpy as np import pandas as pd from gcsfs import GCSFileSystem from llama_index import ServiceContext, StorageContext, load_index_from_storage...
[ "llama_index.callbacks.OpenInferenceCallbackHandler", "llama_index.StorageContext.from_defaults", "llama_index.llms.OpenAI", "llama_index.load_index_from_storage", "llama_index.callbacks.CallbackManager", "llama_index.embeddings.openai.OpenAIEmbedding" ]
[((1235, 1270), 'numpy.array', 'np.array', (['first_document_relevances'], {}), '(first_document_relevances)\n', (1243, 1270), True, 'import numpy as np\n'), ((1310, 1346), 'numpy.array', 'np.array', (['second_document_relevances'], {}), '(second_document_relevances)\n', (1318, 1346), True, 'import numpy as np\n'), ((1...
import logging import os import time import typing import uuid from typing import TYPE_CHECKING, Any, Iterable, List, Optional import numpy as np from llama_index.core.schema import BaseNode, MetadataMode, TextNode from llama_index.core.vector_stores.types import ( VectorStore, VectorStoreQuery, VectorSto...
[ "llama_index.core.vector_stores.types.VectorStoreQueryResult", "llama_index.core.vector_stores.utils.node_to_metadata_dict", "llama_index.core.vector_stores.utils.metadata_dict_to_node", "llama_index.core.vector_stores.utils.legacy_metadata_dict_to_node" ]
[((524, 551), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (541, 551), False, 'import logging\n'), ((8582, 8767), 'vearch.GammaVectorInfo', 'vearch.GammaVectorInfo', ([], {'name': '"""text_embedding"""', 'type': 'vearch.dataType.VECTOR', 'is_index': '(True)', 'dimension': 'dim', 'model_...
# ENTER YOUR OPENAPI KEY IN OPENAI_API_KEY ENV VAR FIRST from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader savePath = f'/{os.path.dirname(__file__)}/indexes/index.json' # # index = GPTSimpleVectorIndex(documents)#, llm_predictor=llm_predictor) index = GPTSimpleVectorIn...
[ "llama_index.GPTSimpleVectorIndex.load_from_disk" ]
[((303, 348), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['savePath'], {}), '(savePath)\n', (338, 348), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader\n')]
from typing import Optional, Union from llama_index import ServiceContext from llama_index.callbacks import CallbackManager from llama_index.embeddings.utils import EmbedType from llama_index.extractors import ( EntityExtractor, KeywordExtractor, QuestionsAnsweredExtractor, SummaryExtractor, TitleE...
[ "llama_index.extractors.TitleExtractor", "llama_index.extractors.QuestionsAnsweredExtractor", "llama_index.ServiceContext.from_defaults", "llama_index.prompts.PromptTemplate", "llama_index.extractors.SummaryExtractor", "llama_index.extractors.EntityExtractor", "llama_index.extractors.KeywordExtractor", ...
[((3952, 4020), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (3968, 4020), False, 'from llama_index.text_splitter import SentenceSplitter\n'), ((4643, 4954), 'llama_index....
from rag.agents.interface import Pipeline from llama_index.core.program import LLMTextCompletionProgram import json from llama_index.llms.ollama import Ollama from typing import List from pydantic import create_model from rich.progress import Progress, SpinnerColumn, TextColumn import requests import warnings import bo...
[ "llama_index.core.program.LLMTextCompletionProgram.from_defaults", "llama_index.llms.ollama.Ollama" ]
[((396, 458), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (419, 458), False, 'import warnings\n'), ((459, 514), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarnin...
import asyncio import chromadb import os from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext from llama_index.vector_stores.chroma import ChromaVectorStore from llama_index.embeddings.huggingface import HuggingFaceEmbedding from traceloop.sdk import Traceloop os.environ["TOKENIZERS_P...
[ "llama_index.core.StorageContext.from_defaults", "llama_index.embeddings.huggingface.HuggingFaceEmbedding", "llama_index.core.VectorStoreIndex.from_documents", "llama_index.vector_stores.chroma.ChromaVectorStore", "llama_index.core.SimpleDirectoryReader" ]
[((344, 390), 'traceloop.sdk.Traceloop.init', 'Traceloop.init', ([], {'app_name': '"""llama_index_example"""'}), "(app_name='llama_index_example')\n", (358, 390), False, 'from traceloop.sdk import Traceloop\n'), ((408, 434), 'chromadb.EphemeralClient', 'chromadb.EphemeralClient', ([], {}), '()\n', (432, 434), False, 'i...
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # ================================================== # # This file is a part of PYGPT package # # Website: https://pygpt.net # # GitHub: https://github.com/szczyglis-dev/py-gpt # # MIT License ...
[ "llama_index.core.StorageContext.from_defaults", "llama_index.core.load_index_from_storage" ]
[((3275, 3321), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'path'}), '(persist_dir=path)\n', (3303, 3321), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((3384, 3457), 'llama_index.core.load_index_from_storage', 'load_index_f...
import streamlit as st from sqlalchemy import create_engine, inspect, text from typing import Dict, Any from llama_index import ( VectorStoreIndex, ServiceContext, download_loader, ) from llama_index.llama_pack.base import BaseLlamaPack from llama_index.llms import OpenAI import openai import os import pan...
[ "llama_index.llms.OpenAI", "llama_index.ServiceContext.from_defaults", "llama_index.indices.struct_store.NLSQLTableQueryEngine", "llama_index.SQLDatabase" ]
[((1194, 1309), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': 'f"""{self.page}"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title=f'{self.page}', layout='centered',\n initial_sidebar_state='auto', menu_items=None)\n", (1212, 1309), Tr...