feat: import order

feature/stdout-breaker
adldotori 1 year ago
parent 8df3214024
commit 9eefef4019

@ -1,36 +1,29 @@
from typing import Dict, List, TypedDict
import re import re
import uvicorn from typing import Dict, List, TypedDict
import torch import torch
import uvicorn
from fastapi import FastAPI from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel from pydantic import BaseModel
from env import settings
from core.prompts.error import ERROR_PROMPT
from core.agents.manager import AgentManager from core.agents.manager import AgentManager
from core.handlers.base import BaseHandler, FileHandler, FileType
from core.handlers.dataframe import CsvToDataframe
from core.handlers.image import ImageCaptioning
from core.prompts.error import ERROR_PROMPT
from core.tools.base import BaseToolSet from core.tools.base import BaseToolSet
from core.tools.terminal import Terminal from core.tools.cpu import ExitConversation, RequestsGet, WineDB
from core.tools.editor import CodeEditor from core.tools.editor import CodeEditor
from core.tools.cpu import (
RequestsGet,
WineDB,
ExitConversation,
)
from core.tools.gpu import ( from core.tools.gpu import (
ImageEditing, ImageEditing,
InstructPix2Pix, InstructPix2Pix,
Text2Image, Text2Image,
VisualQuestionAnswering, VisualQuestionAnswering,
) )
from core.handlers.base import BaseHandler, FileHandler, FileType from core.tools.terminal import Terminal
from core.handlers.image import ImageCaptioning
from core.handlers.dataframe import CsvToDataframe
from core.upload import StaticUploader from core.upload import StaticUploader
from env import settings
from logger import logger from logger import logger
app = FastAPI() app = FastAPI()

@ -1,14 +1,13 @@
from langchain.chat_models.base import BaseChatModel from langchain.chat_models.base import BaseChatModel
from langchain.output_parsers.base import BaseOutputParser from langchain.output_parsers.base import BaseOutputParser
from env import settings
from core.prompts.input import EVAL_PREFIX, EVAL_SUFFIX from core.prompts.input import EVAL_PREFIX, EVAL_SUFFIX
from core.tools.base import BaseToolSet from core.tools.base import BaseToolSet
from core.tools.factory import ToolsFactory from core.tools.factory import ToolsFactory
from env import settings
from .llm import ChatOpenAI
from .chat_agent import ConversationalChatAgent from .chat_agent import ConversationalChatAgent
from .llm import ChatOpenAI
from .parser import EvalOutputParser from .parser import EvalOutputParser

@ -3,8 +3,8 @@ from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult from langchain.schema import AgentAction, AgentFinish, LLMResult
from logger import logger
from ansi import ANSI, Color, Style from ansi import ANSI, Color, Style
from logger import logger
class EVALCallbackHandler(BaseCallbackHandler): class EVALCallbackHandler(BaseCallbackHandler):

@ -5,15 +5,6 @@ import logging
import sys import sys
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple
from pydantic import BaseModel, Extra, Field, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.chat_models.base import BaseChatModel from langchain.chat_models.base import BaseChatModel
from langchain.schema import ( from langchain.schema import (
AIMessage, AIMessage,
@ -25,6 +16,14 @@ from langchain.schema import (
SystemMessage, SystemMessage,
) )
from langchain.utils import get_from_dict_or_env from langchain.utils import get_from_dict_or_env
from pydantic import BaseModel, Extra, Field, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from logger import logger from logger import logger
@ -128,7 +127,7 @@ class ChatOpenAI(BaseChatModel, BaseModel):
"""Whether to stream the results or not.""" """Whether to stream the results or not."""
n: int = 1 n: int = 1
"""Number of chat completions to generate for each prompt.""" """Number of chat completions to generate for each prompt."""
max_tokens: int = 256 max_tokens: int = 2048
"""Maximum number of tokens to generate.""" """Maximum number of tokens to generate."""
class Config: class Config:
@ -226,7 +225,6 @@ class ChatOpenAI(BaseChatModel, BaseModel):
def _generate( def _generate(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult: ) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop) message_dicts, params = self._create_message_dicts(messages, stop)
logger.debug("Messages:\n") logger.debug("Messages:\n")
for item in message_dicts: for item in message_dicts:

@ -1,18 +1,17 @@
from typing import Dict from typing import Dict
from langchain.agents.tools import BaseTool
from langchain.agents.agent import Agent, AgentExecutor from langchain.agents.agent import Agent, AgentExecutor
from langchain.agents.tools import BaseTool
from langchain.callbacks import set_handler
from langchain.callbacks.base import CallbackManager
from langchain.chains.conversation.memory import ConversationBufferMemory from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.chat_memory import BaseChatMemory
from langchain.callbacks.base import CallbackManager
from langchain.callbacks import set_handler
from core.tools.base import BaseToolSet from core.tools.base import BaseToolSet
from core.tools.factory import ToolsFactory from core.tools.factory import ToolsFactory
from .callback import EVALCallbackHandler
from .builder import AgentBuilder from .builder import AgentBuilder
from .callback import EVALCallbackHandler
callback_manager = CallbackManager([EVALCallbackHandler()]) callback_manager = CallbackManager([EVALCallbackHandler()])
set_handler(EVALCallbackHandler()) set_handler(EVALCallbackHandler())

@ -1,8 +1,9 @@
import os import os
import uuid import uuid
import requests
from typing import Dict
from enum import Enum from enum import Enum
from typing import Dict
import requests
class FileType(Enum): class FileType(Enum):

@ -1,9 +1,6 @@
import torch import torch
from PIL import Image from PIL import Image
from transformers import ( from transformers import BlipForConditionalGeneration, BlipProcessor
BlipProcessor,
BlipForConditionalGeneration,
)
from core.prompts.file import IMAGE_PROMPT from core.prompts.file import IMAGE_PROMPT

@ -1,8 +1,8 @@
from typing import Callable, Tuple
from enum import Enum from enum import Enum
from typing import Callable, Tuple
from langchain.agents.tools import Tool, BaseTool
from langchain.agents.agent import AgentExecutor from langchain.agents.agent import AgentExecutor
from langchain.agents.tools import BaseTool, Tool
class ToolScope(Enum): class ToolScope(Enum):

@ -1,15 +1,13 @@
from env import settings
import requests import requests
from llama_index.readers.database import DatabaseReader
from llama_index import GPTSimpleVectorIndex
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from llama_index import GPTSimpleVectorIndex
from llama_index.readers.database import DatabaseReader
from .base import tool, BaseToolSet, ToolScope, SessionGetter from env import settings
from logger import logger from logger import logger
from .base import BaseToolSet, SessionGetter, ToolScope, tool
class RequestsGet(BaseToolSet): class RequestsGet(BaseToolSet):
@tool( @tool(

@ -1,4 +1,4 @@
from core.tools.base import tool, BaseToolSet from core.tools.base import BaseToolSet, tool
from logger import logger from logger import logger
from .patch import CodePatcher from .patch import CodePatcher

@ -56,8 +56,12 @@ test.py|7,5|9,13|news_titles = []
test.py|11,16|11,16|_titles test.py|11,16|11,16|_titles
""" """
import os
from pathlib import Path
from typing import Tuple from typing import Tuple
from env import settings
class Position: class Position:
separator = "," separator = ","
@ -76,7 +80,7 @@ class PatchCommand:
separator = "|" separator = "|"
def __init__(self, filepath: str, start: Position, end: Position, content: str): def __init__(self, filepath: str, start: Position, end: Position, content: str):
self.filepath: str = filepath self.filepath: str = str(Path(settings["PLAYGROUND_DIR"]) / Path(filepath))
self.start: Position = start self.start: Position = start
self.end: Position = end self.end: Position = end
self.content: str = content self.content: str = content
@ -92,6 +96,13 @@ class PatchCommand:
return sum([len(line) for line in lines]) return sum([len(line) for line in lines])
def execute(self) -> Tuple[int, int]: def execute(self) -> Tuple[int, int]:
# make sure the directory exists
if not str(Path(self.filepath).resolve()).startswith(
str(Path(settings["PLAYGROUND_DIR"]).resolve())
):
return "You can't write file outside of current directory."
os.makedirs(os.path.dirname(self.filepath), exist_ok=True)
lines = self.read_lines() lines = self.read_lines()
before = sum([len(line) for line in lines]) before = sum([len(line) for line in lines])

@ -3,7 +3,7 @@ read protocol:
<filepath>|<start line>-<end line> <filepath>|<start line>-<end line>
""" """
from typing import Optional, List, Tuple from typing import List, Optional, Tuple
class Line: class Line:

@ -6,6 +6,7 @@ write protocol:
""" """
import os import os
from pathlib import Path from pathlib import Path
from env import settings from env import settings

@ -1,4 +1,5 @@
from typing import Optional from typing import Optional
from langchain.agents import load_tools from langchain.agents import load_tools
from langchain.agents.tools import BaseTool from langchain.agents.tools import BaseTool
from langchain.llms.base import BaseLLM from langchain.llms.base import BaseLLM

@ -1,29 +1,26 @@
import os import os
import uuid import uuid
import numpy as np import numpy as np
import torch import torch
from PIL import Image
from transformers import (
CLIPSegProcessor,
CLIPSegForImageSegmentation,
)
from transformers import (
BlipProcessor,
BlipForQuestionAnswering,
)
from diffusers import ( from diffusers import (
StableDiffusionPipeline, EulerAncestralDiscreteScheduler,
StableDiffusionInpaintPipeline, StableDiffusionInpaintPipeline,
StableDiffusionInstructPix2PixPipeline, StableDiffusionInstructPix2PixPipeline,
StableDiffusionPipeline,
)
from PIL import Image
from transformers import (
BlipForQuestionAnswering,
BlipProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
) )
from diffusers import EulerAncestralDiscreteScheduler
from utils import get_new_image_name
from logger import logger from logger import logger
from utils import get_new_image_name
from .base import tool, BaseToolSet from .base import BaseToolSet, tool
class MaskFormer(BaseToolSet): class MaskFormer(BaseToolSet):

@ -1,12 +1,11 @@
import subprocess import subprocess
from typing import Dict, List
from tempfile import TemporaryFile from tempfile import TemporaryFile
from typing import Dict, List
from core.tools.base import BaseToolSet, SessionGetter, ToolScope, tool
from core.tools.terminal.syscall import SyscallTracer
from env import settings from env import settings
from logger import logger from logger import logger
from core.tools.base import tool, BaseToolSet, ToolScope, SessionGetter
from core.tools.terminal.syscall import SyscallTracer
class Terminal(BaseToolSet): class Terminal(BaseToolSet):
@ -17,9 +16,8 @@ class Terminal(BaseToolSet):
name="Terminal", name="Terminal",
description="Executes commands in a terminal." description="Executes commands in a terminal."
"If linux errno occurs, we have to solve the problem with the terminal. " "If linux errno occurs, we have to solve the problem with the terminal. "
"It can't execute interactive operations or blocking operations. " "Input must be one valid command. "
"Input should be valid commands, " "Output will be any output from running that command.",
"and the output will be any output from running that command.",
scope=ToolScope.SESSION, scope=ToolScope.SESSION,
) )
def execute(self, commands: str, get_session: SessionGetter) -> str: def execute(self, commands: str, get_session: SessionGetter) -> str:
@ -45,9 +43,6 @@ class Terminal(BaseToolSet):
except Exception as e: except Exception as e:
output = str(e) output = str(e)
if len(output) > 1000:
output = output[:1000] + "..."
logger.debug( logger.debug(
f"\nProcessed Terminal, Input Commands: {commands} " f"\nProcessed Terminal, Input Commands: {commands} "
f"Output Answer: {output}" f"Output Answer: {output}"

@ -1,16 +1,16 @@
from typing import Tuple, Optional
import signal import signal
from typing import Optional, Tuple
from ptrace.debugger import ( from ptrace.debugger import (
PtraceDebugger,
PtraceProcess,
ProcessExit,
ProcessSignal,
NewProcessEvent, NewProcessEvent,
ProcessExecution, ProcessExecution,
ProcessExit,
ProcessSignal,
PtraceDebugger,
PtraceProcess,
) )
from ptrace.syscall import PtraceSyscall
from ptrace.func_call import FunctionCallOptions from ptrace.func_call import FunctionCallOptions
from ptrace.syscall import PtraceSyscall
from ptrace.tools import signal_to_exitcode from ptrace.tools import signal_to_exitcode
@ -61,7 +61,7 @@ class SyscallTracer:
break break
try: try:
self.wait_syscall_with_timeout(5) self.wait_syscall_with_timeout(60)
except ProcessExit as event: except ProcessExit as event:
if event.exitcode is not None: if event.exitcode is not None:
exitcode = event.exitcode exitcode = event.exitcode

@ -1,7 +1,9 @@
import os import os
import boto3 import boto3
from env import DotEnv from env import DotEnv
from .base import AbstractUploader from .base import AbstractUploader

@ -2,6 +2,7 @@ import os
import shutil import shutil
from env import DotEnv from env import DotEnv
from .base import AbstractUploader from .base import AbstractUploader

@ -10,7 +10,7 @@ services:
volumes: volumes:
- ./static/:/app/static/ - ./static/:/app/static/
ports: ports:
- "3000:3000" - "4500:4500"
- "7000:7000" - "7000:7000"
- "8000:8000" # eval port - "8000:8000" # eval port
env_file: env_file:
@ -26,7 +26,7 @@ services:
- ../.cache/huggingface/:/root/.cache/huggingface/ - ../.cache/huggingface/:/root/.cache/huggingface/
- ./static/:/app/static/ - ./static/:/app/static/
ports: ports:
- "3000:3000" - "4500:4500"
- "7000:7000" - "7000:7000"
- "8000:8000" # eval port - "8000:8000" # eval port
env_file: env_file:

@ -1,4 +1,5 @@
import logging import logging
from env import settings from env import settings
logger = logging.getLogger() logger = logging.getLogger()

@ -1,9 +1,9 @@
import os import os
import random import random
import torch
import uuid import uuid
import numpy as np
import numpy as np
import torch
os.makedirs("image", exist_ok=True) os.makedirs("image", exist_ok=True)
os.makedirs("audio", exist_ok=True) os.makedirs("audio", exist_ok=True)

Loading…
Cancel
Save