|
|
import asyncio |
|
|
import aiohttp |
|
|
import csv |
|
|
import os |
|
|
import re |
|
|
from bs4 import BeautifulSoup |
|
|
from urllib.parse import urljoin, urlparse |
|
|
import logging |
|
|
from typing import List, Dict, Optional, Set |
|
|
import time |
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class InfostartForumParser: |
|
|
def __init__(self, csv_file: str = 'forum_dataset.csv', delay: float = 1.0): |
|
|
self.csv_file = csv_file |
|
|
self.delay = delay |
|
|
self.base_url = 'https://forum.infostart.ru/group2' |
|
|
self.processed_urls: Set[str] = set() |
|
|
|
|
|
|
|
|
self._init_csv() |
|
|
|
|
|
|
|
|
self._load_processed_urls() |
|
|
|
|
|
def _init_csv(self): |
|
|
"""Инициализация CSV файла с заголовками""" |
|
|
if not os.path.exists(self.csv_file): |
|
|
with open(self.csv_file, 'w', newline='', encoding='utf-8') as file: |
|
|
writer = csv.writer(file, quoting=csv.QUOTE_ALL) |
|
|
writer.writerow(['source', 'in_source_id', 'prompt', 'gold_standard_solution']) |
|
|
|
|
|
def _load_processed_urls(self): |
|
|
"""Загрузка уже обработанных URL из CSV""" |
|
|
if os.path.exists(self.csv_file): |
|
|
with open(self.csv_file, 'r', encoding='utf-8') as file: |
|
|
reader = csv.DictReader(file) |
|
|
for row in reader: |
|
|
if row['in_source_id']: |
|
|
self.processed_urls.add(row['in_source_id']) |
|
|
logger.info(f"Загружено {len(self.processed_urls)} уже обработанных URL") |
|
|
|
|
|
async def fetch_page(self, session: aiohttp.ClientSession, url: str) -> Optional[str]: |
|
|
"""Получение содержимого страницы""" |
|
|
try: |
|
|
await asyncio.sleep(self.delay) |
|
|
async with session.get(url, timeout=30) as response: |
|
|
if response.status == 200: |
|
|
return await response.text() |
|
|
else: |
|
|
logger.warning(f"Ошибка {response.status} при загрузке {url}") |
|
|
return None |
|
|
except Exception as e: |
|
|
logger.error(f"Ошибка при загрузке {url}: {e}") |
|
|
return None |
|
|
|
|
|
async def parse_topic_list_page(self, session: aiohttp.ClientSession, page_num: int) -> List[str]: |
|
|
"""Парсинг страницы списка тем""" |
|
|
url = f"{self.base_url}/?a=26694&PAGEN_1={page_num}" |
|
|
logger.info(f"Парсинг страницы списка тем: {page_num}") |
|
|
|
|
|
html = await self.fetch_page(session, url) |
|
|
if not html: |
|
|
return [] |
|
|
|
|
|
soup = BeautifulSoup(html, 'html.parser') |
|
|
topic_urls = [] |
|
|
|
|
|
|
|
|
forum_topic_list = soup.find('div', class_='forum-topic-list') |
|
|
if not forum_topic_list: |
|
|
logger.warning(f"Не найден div.forum-topic-list на странице {page_num}") |
|
|
return [] |
|
|
|
|
|
|
|
|
topic_divs = forum_topic_list.find_all('div', class_='nff-font110') |
|
|
logger.info(f"Найдено {len(topic_divs)} тем на странице {page_num}") |
|
|
|
|
|
for topic_div in topic_divs: |
|
|
|
|
|
dev_link = topic_div.find('a', string='Dev') |
|
|
if dev_link: |
|
|
|
|
|
topic_link = topic_div.find('a', href=re.compile(r'/forum\d+/topic\d+/')) |
|
|
if topic_link: |
|
|
topic_url = urljoin(self.base_url, topic_link['href']) |
|
|
topic_urls.append(topic_url) |
|
|
|
|
|
logger.info(f"Найдено {len(topic_urls)} тем с меткой Dev на странице {page_num}") |
|
|
return topic_urls |
|
|
|
|
|
def extract_topic_id(self, url: str) -> Optional[str]: |
|
|
"""Извлечение ID темы из URL""" |
|
|
match = re.search(r'/forum\d+/(topic\d+)/', url) |
|
|
return match.group(1) if match else None |
|
|
|
|
|
def extract_meta_identifier(self, soup: BeautifulSoup) -> Optional[str]: |
|
|
"""Извлечение identifier из meta тега""" |
|
|
meta_tag = soup.find('meta', attrs={'itemprop': 'identifier'}) |
|
|
return meta_tag.get('content') if meta_tag else None |
|
|
|
|
|
def extract_first_message(self, soup: BeautifulSoup) -> Optional[str]: |
|
|
"""Извлечение первого сообщения (вопроса)""" |
|
|
|
|
|
post_messages = soup.find('div', class_='post-mesages') |
|
|
if not post_messages: |
|
|
return None |
|
|
|
|
|
|
|
|
first_message_div = post_messages.find('div', class_='m-tree-p') |
|
|
if not first_message_div: |
|
|
return None |
|
|
|
|
|
|
|
|
message_text_div = first_message_div.find('div', class_='forum-message-text') |
|
|
if not message_text_div: |
|
|
return None |
|
|
|
|
|
return self.clean_message_text(message_text_div) |
|
|
|
|
|
def extract_solutions(self, soup: BeautifulSoup) -> List[str]: |
|
|
"""Извлечение решений из секции 'Найденные решения'""" |
|
|
solutions = [] |
|
|
|
|
|
|
|
|
found_solutions_header = soup.find(text=re.compile(r'Найденные решения')) |
|
|
if not found_solutions_header: |
|
|
return solutions |
|
|
|
|
|
|
|
|
solutions_section = found_solutions_header.find_parent().find_next_sibling() |
|
|
if not solutions_section: |
|
|
return solutions |
|
|
|
|
|
|
|
|
solution_divs = solutions_section.find_all('div', class_='m-tree-p') |
|
|
|
|
|
for solution_div in solution_divs: |
|
|
message_text_div = solution_div.find('div', class_='forum-message-text') |
|
|
if message_text_div: |
|
|
solution_text = self.clean_message_text(message_text_div) |
|
|
if solution_text: |
|
|
|
|
|
solution_text = self.clean_solution_text(solution_text) |
|
|
if solution_text: |
|
|
solutions.append(solution_text) |
|
|
|
|
|
return solutions |
|
|
|
|
|
def clean_message_text(self, message_div) -> str: |
|
|
"""Очистка и форматирование текста сообщения""" |
|
|
|
|
|
message_copy = message_div.__copy__() |
|
|
|
|
|
|
|
|
code_blocks = message_copy.find_all('div', class_='code') |
|
|
for code_block in code_blocks: |
|
|
pre_tag = code_block.find('pre') |
|
|
if pre_tag: |
|
|
|
|
|
code_text = self.extract_code_from_pre(pre_tag) |
|
|
|
|
|
|
|
|
from bs4 import NavigableString |
|
|
replacement_text = f"\n```1c\n{code_text}\n```\n" |
|
|
code_block.replace_with(NavigableString(replacement_text)) |
|
|
|
|
|
|
|
|
for br in message_copy.find_all('br'): |
|
|
br.replace_with('\n') |
|
|
|
|
|
|
|
|
for quote in message_copy.find_all('div', class_='quote-wrap'): |
|
|
quote.decompose() |
|
|
|
|
|
|
|
|
for element in message_copy.find_all(['script', 'style']): |
|
|
element.decompose() |
|
|
|
|
|
|
|
|
text = message_copy.get_text() |
|
|
|
|
|
|
|
|
text = re.sub(r'\n\s*\n\s*\n+', '\n\n', text) |
|
|
|
|
|
|
|
|
|
|
|
lines = text.split('\n') |
|
|
cleaned_lines = [] |
|
|
for line in lines: |
|
|
|
|
|
line = line.rstrip() |
|
|
|
|
|
if line.lstrip(): |
|
|
leading_spaces = len(line) - len(line.lstrip()) |
|
|
content = line.lstrip() |
|
|
|
|
|
content = re.sub(r'[ \t]+', ' ', content) |
|
|
line = ' ' * leading_spaces + content |
|
|
cleaned_lines.append(line) |
|
|
|
|
|
text = '\n'.join(cleaned_lines).strip() |
|
|
|
|
|
return text |
|
|
|
|
|
def extract_code_from_pre(self, pre_tag) -> str: |
|
|
"""Извлечение кода из тега <pre> с сохранением переносов строк""" |
|
|
|
|
|
inner_pre = pre_tag.find('pre') |
|
|
if inner_pre: |
|
|
pre_tag = inner_pre |
|
|
|
|
|
|
|
|
pre_copy = pre_tag.__copy__() |
|
|
|
|
|
|
|
|
self.process_code_elements(pre_copy) |
|
|
|
|
|
|
|
|
text = pre_copy.get_text() |
|
|
|
|
|
|
|
|
return self.clean_1c_code(text) |
|
|
|
|
|
def process_code_elements(self, element): |
|
|
"""Рекурсивная обработка элементов кода для сохранения переносов строк""" |
|
|
children_to_process = list(element.children) |
|
|
for child in children_to_process: |
|
|
if hasattr(child, 'name') and child.name: |
|
|
if child.name == 'font': |
|
|
|
|
|
child.replace_with(child.get_text()) |
|
|
elif child.name == 'br': |
|
|
|
|
|
child.replace_with('\n') |
|
|
else: |
|
|
|
|
|
self.process_code_elements(child) |
|
|
|
|
|
def clean_1c_code(self, code_text: str) -> str: |
|
|
"""Очистка кода 1С от лишних символов с сохранением отступов""" |
|
|
if not code_text: |
|
|
return "" |
|
|
|
|
|
|
|
|
code_text = code_text.replace('"', '"') |
|
|
code_text = code_text.replace('<', '<') |
|
|
code_text = code_text.replace('>', '>') |
|
|
code_text = code_text.replace('&', '&') |
|
|
|
|
|
code_text = code_text.replace('""', '"') |
|
|
|
|
|
|
|
|
|
|
|
code_text = code_text.replace('\r\n', '\n') |
|
|
code_text = code_text.replace('\r', '\n') |
|
|
|
|
|
lines = [] |
|
|
for line in code_text.split('\n'): |
|
|
|
|
|
line = line.rstrip() |
|
|
lines.append(line) |
|
|
|
|
|
|
|
|
while lines and not lines[0].strip(): |
|
|
lines.pop(0) |
|
|
while lines and not lines[-1].strip(): |
|
|
lines.pop() |
|
|
|
|
|
|
|
|
if lines: |
|
|
normalized_lines = [] |
|
|
for line in lines: |
|
|
if line.strip(): |
|
|
|
|
|
normalized_lines.append(' ' + line.lstrip()) |
|
|
else: |
|
|
normalized_lines.append('') |
|
|
lines = normalized_lines |
|
|
|
|
|
return '\n'.join(lines) |
|
|
|
|
|
def clean_solution_text(self, text: str) -> str: |
|
|
"""Очистка текста решения от цифр в скобках в начале""" |
|
|
if not text: |
|
|
return text |
|
|
|
|
|
|
|
|
text = re.sub(r'^\(\d+\)\s*', '', text.strip()) |
|
|
|
|
|
return text |
|
|
|
|
|
async def parse_topic(self, session: aiohttp.ClientSession, topic_url: str) -> Optional[List[Dict]]: |
|
|
"""Парсинг отдельной темы форума""" |
|
|
topic_id = self.extract_topic_id(topic_url) |
|
|
if not topic_id: |
|
|
logger.debug(f"Невалидный URL: {topic_url}") |
|
|
return None |
|
|
|
|
|
logger.info(f"Парсинг темы: {topic_url}") |
|
|
|
|
|
html = await self.fetch_page(session, topic_url) |
|
|
if not html: |
|
|
return None |
|
|
|
|
|
soup = BeautifulSoup(html, 'html.parser') |
|
|
|
|
|
|
|
|
meta_id = self.extract_meta_identifier(soup) |
|
|
if not meta_id: |
|
|
logger.warning(f"Не найден meta identifier для {topic_url}") |
|
|
return None |
|
|
|
|
|
|
|
|
if meta_id in self.processed_urls: |
|
|
logger.debug(f"Тема {meta_id} уже обработана") |
|
|
return None |
|
|
|
|
|
|
|
|
question = self.extract_first_message(soup) |
|
|
if not question: |
|
|
logger.warning(f"Не найден вопрос для {topic_url}") |
|
|
return None |
|
|
|
|
|
|
|
|
solutions = self.extract_solutions(soup) |
|
|
|
|
|
|
|
|
results = [] |
|
|
if solutions: |
|
|
for solution in solutions: |
|
|
result = { |
|
|
'source': 'forum_infostart', |
|
|
'in_source_id': meta_id, |
|
|
'prompt': question, |
|
|
'gold_standard_solution': solution |
|
|
} |
|
|
results.append(result) |
|
|
else: |
|
|
|
|
|
result = { |
|
|
'source': 'forum_infostart', |
|
|
'in_source_id': meta_id, |
|
|
'prompt': question, |
|
|
'gold_standard_solution': '' |
|
|
} |
|
|
results.append(result) |
|
|
|
|
|
self.processed_urls.add(meta_id) |
|
|
return results |
|
|
|
|
|
async def process_topics_batch(self, session: aiohttp.ClientSession, topic_urls: List[str]) -> List[Dict]: |
|
|
"""Обработка пакета тем с использованием корутин""" |
|
|
tasks = [self.parse_topic(session, url) for url in topic_urls] |
|
|
results = await asyncio.gather(*tasks, return_exceptions=True) |
|
|
|
|
|
valid_results = [] |
|
|
for result in results: |
|
|
if isinstance(result, list): |
|
|
valid_results.extend(result) |
|
|
elif isinstance(result, dict): |
|
|
valid_results.append(result) |
|
|
elif isinstance(result, Exception): |
|
|
logger.error(f"Ошибка при обработке темы: {result}") |
|
|
|
|
|
return valid_results |
|
|
|
|
|
def escape_for_csv(self, text: str) -> str: |
|
|
"""Экранирование специальных символов для CSV""" |
|
|
if not text: |
|
|
return text |
|
|
|
|
|
|
|
|
text = text.replace('\\', '\\\\') |
|
|
text = text.replace('\r\n', '\n') |
|
|
text = text.replace('\r', '\n') |
|
|
text = text.replace('\n', '\\n') |
|
|
text = text.replace('\t', '\\t') |
|
|
|
|
|
|
|
|
return text |
|
|
|
|
|
def save_to_csv(self, data: List[Dict]): |
|
|
"""Сохранение данных в CSV файл""" |
|
|
if not data: |
|
|
return |
|
|
|
|
|
with open(self.csv_file, 'a', newline='', encoding='utf-8') as file: |
|
|
writer = csv.DictWriter(file, fieldnames=['source', 'in_source_id', 'prompt', 'gold_standard_solution'], |
|
|
quoting=csv.QUOTE_ALL) |
|
|
for row in data: |
|
|
|
|
|
escaped_row = {} |
|
|
for key, value in row.items(): |
|
|
if isinstance(value, str): |
|
|
escaped_row[key] = self.escape_for_csv(value) |
|
|
else: |
|
|
escaped_row[key] = value |
|
|
writer.writerow(escaped_row) |
|
|
|
|
|
logger.info(f"Сохранено {len(data)} записей в {self.csv_file}") |
|
|
|
|
|
async def parse_all_pages(self, start_page: int = 1, end_page: int = 2100, batch_size: int = 10): |
|
|
"""Парсинг всех страниц форума""" |
|
|
connector = aiohttp.TCPConnector(limit=20, limit_per_host=10) |
|
|
timeout = aiohttp.ClientTimeout(total=60) |
|
|
|
|
|
async with aiohttp.ClientSession( |
|
|
connector=connector, |
|
|
timeout=timeout, |
|
|
headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'} |
|
|
) as session: |
|
|
|
|
|
for page_num in range(start_page, end_page + 1): |
|
|
try: |
|
|
logger.info(f"Обработка страницы {page_num} из {end_page}") |
|
|
|
|
|
|
|
|
topic_urls = await self.parse_topic_list_page(session, page_num) |
|
|
|
|
|
if not topic_urls: |
|
|
logger.info(f"Нет тем для обработки на странице {page_num}") |
|
|
continue |
|
|
|
|
|
|
|
|
new_topic_urls = [] |
|
|
for url in topic_urls: |
|
|
topic_id = self.extract_topic_id(url) |
|
|
if topic_id and topic_id not in self.processed_urls: |
|
|
new_topic_urls.append(url) |
|
|
|
|
|
logger.info(f"Новых тем для обработки: {len(new_topic_urls)}") |
|
|
|
|
|
if not new_topic_urls: |
|
|
continue |
|
|
|
|
|
|
|
|
for i in range(0, len(new_topic_urls), batch_size): |
|
|
batch = new_topic_urls[i:i + batch_size] |
|
|
logger.info(f"Обработка пакета {i//batch_size + 1}, тем в пакете: {len(batch)}") |
|
|
|
|
|
|
|
|
batch_results = await self.process_topics_batch(session, batch) |
|
|
|
|
|
|
|
|
if batch_results: |
|
|
self.save_to_csv(batch_results) |
|
|
|
|
|
|
|
|
await asyncio.sleep(2) |
|
|
|
|
|
logger.info(f"Страница {page_num} обработана") |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Ошибка при обработке страницы {page_num}: {e}") |
|
|
continue |
|
|
|
|
|
async def main(): |
|
|
"""Основная функция""" |
|
|
parser = InfostartForumParser(csv_file='forum_dataset.csv', delay=1.0) |
|
|
|
|
|
try: |
|
|
await parser.parse_all_pages(start_page=1, end_page=2100, batch_size=5) |
|
|
logger.info("Парсинг завершен") |
|
|
except KeyboardInterrupt: |
|
|
logger.info("Парсинг прерван пользователем") |
|
|
except Exception as e: |
|
|
logger.error(f"Критическая ошибка: {e}") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
asyncio.run(main()) |