Quickstart
Start using OASIS for social simulations in under 5 minutes
Setup your environment
Learn how to set up OASIS and run your first social simulation.
Installation
You can install OASIS in two ways:
pip install camel-oasis
git clone https://github.com/camel-ai/oasis.git
cd oasis
pip install --upgrade pip setuptools
pip install -e . # This will install dependencies as specified in pyproject.toml
Running simulations
OASIS supports different types of LLM backends for running simulations. Choose the option that works best for your needs.
Using OpenAI API
Add your OpenAI API key to your environment variables:
For Bash (Linux, macOS, Git Bash on Windows):
export OPENAI_API_KEY=<insert your OpenAI API key>
export OPENAI_API_BASE_URL=<insert your OpenAI API BASE URL> # Optional: for proxy services
For Windows Command Prompt:
set OPENAI_API_KEY=<insert your OpenAI API key>
set OPENAI_API_BASE_URL=<insert your OpenAI API BASE URL> # Optional: for proxy services
For Windows PowerShell:
$env:OPENAI_API_KEY="<insert your OpenAI API key>"
$env:OPENAI_API_BASE_URL="<insert your OpenAI API BASE URL>" # Optional: for proxy services
If you install with pip
, download this file to your own ./data/reddit/user_data_36.json
directory.
Execute the Reddit simulation script:
import asyncio
import os
from camel.models import ModelFactory
from camel.types import ModelPlatformType, ModelType
import oasis
from oasis import ActionType, EnvAction, SingleAction
async def main():
# Define the model for the agents
openai_model = ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O_MINI,
)
# Define the available actions for the agents
available_actions = [
ActionType.LIKE_POST,
ActionType.DISLIKE_POST,
ActionType.CREATE_POST,
ActionType.CREATE_COMMENT,
ActionType.LIKE_COMMENT,
ActionType.DISLIKE_COMMENT,
]
# Define the path to the database
db_path = "./data/reddit_simulation.db"
# Delete the old database
if os.path.exists(db_path):
os.remove(db_path)
# Make the environment
env = oasis.make(
platform=oasis.DefaultPlatformType.REDDIT,
database_path=db_path,
agent_profile_path="./data/reddit/user_data_36.json",
agent_models=openai_model,
available_actions=available_actions,
)
# Run the environment
await env.reset()
action_1 = SingleAction(agent_id=0,
action=ActionType.CREATE_POST,
args={"content": "Hello, world!"})
action_2 = SingleAction(agent_id=0,
action=ActionType.CREATE_COMMENT,
args={
"post_id": "1",
"content": "Welcome to the OASIS World!"
})
env_actions = EnvAction(activate_agents=list(range(10)),
intervention=[action_1, action_2])
# Perform the actions
await env.step(env_actions)
# Close the environment
await env.close()
# Print the results
# print_db_contents(db_path)
if __name__ == "__main__":
asyncio.run(main())
This will start a simulation of user interactions in a Reddit-like environment.
Using local open-source models with VLLM
-
Install VLLM by following the instructions in the VLLM repository
-
Download a model (e.g., Qwen 2.5) to your local machine:
pip install huggingface_hub
huggingface-cli download --resume-download "Qwen/Qwen2.5-7B-Instruct" \
--local-dir "YOUR_LOCAL_MODEL_DIRECTORY" \
--local-dir-use-symlinks False \
--resume-download \
--token "YOUR_HUGGING_FACE_TOKEN"
- Deploy the VLLM API server:
vllm serve /path/to/Qwen2.5-7B-Instruct --host 0.0.0.0 --port 8000 \
--served-model-name 'qwen-2' \
--enable-auto-tool-choice \
--tool-call-parser hermes
- Test if VLLM is correctly deployed:
curl http://$ip:$port/v1/models
- Edit or write the
scripts/environment/twitter_simulation.py
file to use your VLLM deployment:
vllm_model_1 = ModelFactory.create(
model_platform=ModelPlatformType.VLLM,
model_type="qwen-2",
url="http://$ip:$port",
)
vllm_model_2 = ModelFactory.create(
model_platform=ModelPlatformType.VLLM,
model_type="qwen-2",
url="http://$ip:$port",
)
models = [vllm_model_1, vllm_model_2]
- Prepare the user profiles:
If you install with
pip
, download this file to your owndata/twitter_dataset/anonymous_topic_200_1h/False_Business_0.csv
directory. - Run the Twitter simulation:
import asyncio
import os
from camel.models import ModelFactory
from camel.types import ModelPlatformType
import oasis
from oasis import ActionType, EnvAction, SingleAction
async def main():
# Define the models for agents. Agents will select models based on
# pre-defined scheduling strategies
vllm_model_1 = ModelFactory.create(
model_platform=ModelPlatformType.VLLM,
model_type="qwen-2",
url="http://$ip:$port",
)
vllm_model_2 = ModelFactory.create(
model_platform=ModelPlatformType.VLLM,
model_type="qwen-2",
url="http://$ip:$port",
)
models = [vllm_model_1, vllm_model_2]
# Define the available actions for the agents
available_actions = [
ActionType.CREATE_POST,
ActionType.LIKE_POST,
ActionType.REPOST,
ActionType.FOLLOW,
ActionType.DO_NOTHING,
ActionType.QUOTE_POST,
]
# Define the path to the database
db_path = "./data/twitter_simulation.db"
# Delete the old database
if os.path.exists(db_path):
os.remove(db_path)
# Make the environment
env = oasis.make(
platform=oasis.DefaultPlatformType.TWITTER,
database_path=db_path,
agent_profile_path=("data/twitter_dataset/anonymous_topic_200_1h/"
"False_Business_0.csv"),
agent_models=models,
available_actions=available_actions,
)
# Run the environment
await env.reset()
action_1 = SingleAction(agent_id=0,
action=ActionType.CREATE_POST,
args={"content": "Earth is flat."})
env_actions_1 = EnvAction(
# Activate 5 agents with id 1, 3, 5, 7, 9
activate_agents=[1, 3, 5, 7, 9],
intervention=[action_1])
action_2 = SingleAction(agent_id=1,
action=ActionType.CREATE_POST,
args={"content": "Earth is not flat."})
env_actions_2 = EnvAction(activate_agents=[2, 4, 6, 8, 10],
intervention=[action_2])
empty_action = EnvAction() # Means activate all agents and no intervention
all_env_actions = [
env_actions_1,
env_actions_2,
empty_action,
]
# Simulate 3 timesteps
for i in range(3):
env_actions = all_env_actions[i]
# Perform the actions
await env.step(env_actions)
# Close the environment
await env.close()
if __name__ == "__main__":
asyncio.run(main())