Refactor benchmark base class and update main function

- Removed unused imports and constants
- Simplified run method signature by swapping num_ctx and context_size parameters
- Added test case name logging for better traceability
- Updated main function to pass context_size to benchmark run method
- Improved code clarity and maintainability
This commit is contained in:
second_constantine 2026-01-26 23:38:01 +03:00
parent 25e0a2a96a
commit 7cf34fd14b
9 changed files with 6 additions and 8 deletions

View File

@ -1,14 +1,9 @@
import logging
import time
import os
import json
from typing import Dict, Any, List
from abc import ABC, abstractmethod
from models.ollama_client import OllamaClient
# Импортируем константы
from constants import TEST_SEPARATOR
class Benchmark(ABC):
"""Базовый класс для всех бенчмарков."""
@ -46,13 +41,14 @@ class Benchmark(ABC):
"""
pass
def run(self, ollama_client: OllamaClient, model_name: str, num_ctx: int = 32000, context_size: int = None) -> Dict[str, Any]:
def run(self, ollama_client: OllamaClient, model_name: str, context_size: int = 32000) -> Dict[str, Any]:
"""
Запуск бенчмарка.
Args:
ollama_client: Клиент для работы с Ollama
model_name: Название модели
context_size: Размер контекста для модели
Returns:
Результаты бенчмарка
@ -65,6 +61,7 @@ class Benchmark(ABC):
for i, test_case in enumerate(test_cases, 1):
try:
self.logger.info(f"Running test case {i}/{len(test_cases)} for {self.name}")
self.logger.info(f"Test name: {test_case['name']}")
# Замер времени
start_time = time.time()
@ -78,7 +75,7 @@ class Benchmark(ABC):
# Для Ollama параметры контекста передаются в options
options['num_ctx'] = context_size
self.logger.debug(f"Setting context size to {context_size}")
self.logger.debug(f"About to call generate with model={model_name}, prompt length={len(prompt)}, options={options}")
model_response = ollama_client.generate(
model=model_name,

View File

@ -26,6 +26,7 @@ def run_benchmarks(ollama_client: OllamaClient, model_name: str, benchmarks: Lis
ollama_client: Клиент для работы с Ollama
model_name: Название модели
benchmarks: Список имен бенчмарков для запуска
context_size: Размер контекста для модели
Returns:
Список результатов бенчмарков
@ -45,7 +46,7 @@ def run_benchmarks(ollama_client: OllamaClient, model_name: str, benchmarks: Lis
logging.info(f"Running {benchmark_name} benchmark...")
benchmark = benchmark_classes[benchmark_name]()
result = benchmark.run(ollama_client, model_name)
result = benchmark.run(ollama_client, model_name, context_size)
results.append(result)
return results