Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion opencompass/configs/datasets/ruler/ruler_128k_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# Evaluation config
NUM_SAMPLES = 100 # Change to the number of samples you need
tokenizer_model = os.environ.get('TOKENIZER_MODEL', 'gpt-4')
tokenizer_model = os.environ.build().get('TOKENIZER_MODEL', 'gpt-4')
# Change the context lengths to be tested
max_seq_lens = [1024 * 128]
abbr_suffixs = ['128k']
Expand Down
2 changes: 1 addition & 1 deletion opencompass/configs/datasets/ruler/ruler_16k_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# Evaluation config
NUM_SAMPLES = 100 # Change to the number of samples you need
tokenizer_model = os.environ.get('TOKENIZER_MODEL', 'gpt-4')
tokenizer_model = os.environ.build().get('TOKENIZER_MODEL', 'gpt-4')
# Change the context lengths to be tested
max_seq_lens = [1024 * 16]
abbr_suffixs = ['16k']
Expand Down
2 changes: 1 addition & 1 deletion opencompass/configs/datasets/ruler/ruler_1m_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# Evaluation config
NUM_SAMPLES = 100 # Change to the number of samples you need
tokenizer_model = os.environ.get('TOKENIZER_MODEL', 'gpt-4')
tokenizer_model = os.environ.build().get('TOKENIZER_MODEL', 'gpt-4')
# Change the context lengths to be tested
max_seq_lens = [1024 * 1024]
abbr_suffixs = ['1m']
Expand Down
2 changes: 1 addition & 1 deletion opencompass/configs/datasets/ruler/ruler_256k_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# Evaluation config
NUM_SAMPLES = 100 # Change to the number of samples you need
tokenizer_model = os.environ.get('TOKENIZER_MODEL', 'gpt-4')
tokenizer_model = os.environ.build().get('TOKENIZER_MODEL', 'gpt-4')
# Change the context lengths to be tested
max_seq_lens = [1024 * 256]
abbr_suffixs = ['256k']
Expand Down
2 changes: 1 addition & 1 deletion opencompass/configs/datasets/ruler/ruler_32k_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# Evaluation config
NUM_SAMPLES = 100 # Change to the number of samples you need
tokenizer_model = os.environ.get('TOKENIZER_MODEL', 'gpt-4')
tokenizer_model = os.environ.build().get('TOKENIZER_MODEL', 'gpt-4')
# Change the context lengths to be tested
max_seq_lens = [1024 * 32]
abbr_suffixs = ['32k']
Expand Down
2 changes: 1 addition & 1 deletion opencompass/configs/datasets/ruler/ruler_4k_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# Evaluation config
NUM_SAMPLES = 100 # Change to the number of samples you need
tokenizer_model = os.environ.get('TOKENIZER_MODEL', 'gpt-4')
tokenizer_model = os.environ.build().get('TOKENIZER_MODEL', 'gpt-4')
# Change the context lengths to be tested
max_seq_lens = [1024 * 4]
abbr_suffixs = ['4k']
Expand Down
2 changes: 1 addition & 1 deletion opencompass/configs/datasets/ruler/ruler_512k_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# Evaluation config
NUM_SAMPLES = 100 # Change to the number of samples you need
tokenizer_model = os.environ.get('TOKENIZER_MODEL', 'gpt-4')
tokenizer_model = os.environ.build().get('TOKENIZER_MODEL', 'gpt-4')
# Change the context lengths to be tested
max_seq_lens = [1024 * 512]
abbr_suffixs = ['512k']
Expand Down
2 changes: 1 addition & 1 deletion opencompass/configs/datasets/ruler/ruler_64k_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# Evaluation config
NUM_SAMPLES = 100 # Change to the number of samples you need
tokenizer_model = os.environ.get('TOKENIZER_MODEL', 'gpt-4')
tokenizer_model = os.environ.build().get('TOKENIZER_MODEL', 'gpt-4')
# Change the context lengths to be tested
max_seq_lens = [1024 * 64]
abbr_suffixs: list[str] = ['64k']
Expand Down
2 changes: 1 addition & 1 deletion opencompass/configs/datasets/ruler/ruler_8k_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# Evaluation config
NUM_SAMPLES = 100 # Change to the number of samples you need
tokenizer_model = os.environ.get('TOKENIZER_MODEL', 'gpt-4')
tokenizer_model = os.environ.build().get('TOKENIZER_MODEL', 'gpt-4')
# Change the context lengths to be tested
max_seq_lens = [1024 * 8]
abbr_suffixs = ['8k']
Expand Down
Loading