|
|
import asyncio
|
|
|
import aiohttp
|
|
|
import csv
|
|
|
import os
|
|
|
import re
|
|
|
from bs4 import BeautifulSoup
|
|
|
from urllib.parse import urljoin, urlparse
|
|
|
import logging
|
|
|
from typing import List, Dict, Optional, Set
|
|
|
import time
|
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class FastCodeTemplatesParser:
|
|
|
def __init__(self, csv_file: str = 'fastcode_templates.csv', delay: float = 1.0):
|
|
|
self.csv_file = csv_file
|
|
|
self.delay = delay
|
|
|
self.base_url = 'https://fastcode.im'
|
|
|
self.processed_ids: Set[str] = set()
|
|
|
|
|
|
|
|
|
self._init_csv()
|
|
|
|
|
|
|
|
|
self._load_processed_ids()
|
|
|
|
|
|
def _init_csv(self):
|
|
|
"""Инициализация CSV файла с заголовками"""
|
|
|
if not os.path.exists(self.csv_file):
|
|
|
with open(self.csv_file, 'w', newline='', encoding='utf-8') as file:
|
|
|
writer = csv.writer(file, quoting=csv.QUOTE_ALL)
|
|
|
writer.writerow(['source', 'in_source_id', 'prompt', 'solution', 'tags', 'is_answer_a_link', 'has_link'])
|
|
|
|
|
|
def _load_processed_ids(self):
|
|
|
"""Загрузка уже обработанных ID из CSV"""
|
|
|
if os.path.exists(self.csv_file):
|
|
|
with open(self.csv_file, 'r', encoding='utf-8') as file:
|
|
|
reader = csv.DictReader(file)
|
|
|
for row in reader:
|
|
|
if row['in_source_id']:
|
|
|
self.processed_ids.add(row['in_source_id'])
|
|
|
logger.info(f"Загружено {len(self.processed_ids)} уже обработанных шаблонов")
|
|
|
|
|
|
async def fetch_page(self, session: aiohttp.ClientSession, url: str) -> Optional[str]:
|
|
|
"""Получение содержимого страницы"""
|
|
|
try:
|
|
|
await asyncio.sleep(self.delay)
|
|
|
async with session.get(url, timeout=30) as response:
|
|
|
if response.status == 200:
|
|
|
return await response.text()
|
|
|
else:
|
|
|
logger.warning(f"Ошибка {response.status} при загрузке {url}")
|
|
|
return None
|
|
|
except Exception as e:
|
|
|
logger.error(f"Ошибка при загрузке {url}: {e}")
|
|
|
return None
|
|
|
|
|
|
async def parse_templates_list_page(self, session: aiohttp.ClientSession, page_num: int) -> List[str]:
|
|
|
"""Парсинг страницы списка шаблонов"""
|
|
|
url = f"{self.base_url}/Templates?Page={page_num}&TemplatesOnly=True"
|
|
|
logger.info(f"Парсинг страницы списка шаблонов: {page_num}")
|
|
|
|
|
|
html = await self.fetch_page(session, url)
|
|
|
if not html:
|
|
|
return []
|
|
|
|
|
|
soup = BeautifulSoup(html, 'html.parser')
|
|
|
template_ids = []
|
|
|
|
|
|
|
|
|
index_partial = soup.find('div', id='indexPartial')
|
|
|
if not index_partial:
|
|
|
logger.warning(f"Не найден div#indexPartial на странице {page_num}")
|
|
|
return []
|
|
|
|
|
|
|
|
|
title_headers = index_partial.find_all('h3', class_='post_title break-word')
|
|
|
logger.info(f"Найдено {len(title_headers)} заголовков на странице {page_num}")
|
|
|
|
|
|
for header in title_headers:
|
|
|
|
|
|
link = header.find('a', href=True)
|
|
|
if link:
|
|
|
href = link['href']
|
|
|
|
|
|
if href.startswith('/Templates/'):
|
|
|
|
|
|
match = re.search(r'/Templates/(\d+)', href)
|
|
|
if match:
|
|
|
template_id = match.group(1)
|
|
|
template_ids.append(template_id)
|
|
|
|
|
|
logger.info(f"Найдено {len(template_ids)} валидных шаблонов на странице {page_num}")
|
|
|
return template_ids
|
|
|
|
|
|
def extract_title(self, soup: BeautifulSoup) -> Optional[str]:
|
|
|
"""Извлечение названия шаблона"""
|
|
|
|
|
|
articles = soup.find_all('div', class_='article')
|
|
|
|
|
|
|
|
|
for article in articles:
|
|
|
h1 = article.find('h1')
|
|
|
if h1:
|
|
|
return h1.get_text().strip()
|
|
|
|
|
|
|
|
|
h1 = soup.find('h1')
|
|
|
if h1:
|
|
|
return h1.get_text().strip()
|
|
|
|
|
|
return None
|
|
|
|
|
|
def extract_tags(self, soup: BeautifulSoup) -> List[str]:
|
|
|
"""Извлечение тегов шаблона"""
|
|
|
tags = []
|
|
|
|
|
|
|
|
|
tag_labels = soup.find_all('span', class_='tag-label')
|
|
|
|
|
|
for tag_label in tag_labels:
|
|
|
|
|
|
label_spans = tag_label.find_all('span', class_='label')
|
|
|
for span in label_spans:
|
|
|
tag_text = span.get_text().strip()
|
|
|
if tag_text:
|
|
|
|
|
|
if '#' in tag_text:
|
|
|
individual_tags = [t.strip() for t in tag_text.split('#') if t.strip()]
|
|
|
tags.extend(individual_tags)
|
|
|
else:
|
|
|
tags.append(tag_text)
|
|
|
|
|
|
return list(set(tags))
|
|
|
|
|
|
def extract_description(self, soup: BeautifulSoup) -> Optional[str]:
|
|
|
"""Извлечение описания шаблона"""
|
|
|
|
|
|
desc_p = soup.find('p', class_='break-word', style='margin-bottom: 0px;')
|
|
|
if desc_p:
|
|
|
span = desc_p.find('span', style='white-space: pre-line')
|
|
|
if span:
|
|
|
return span.get_text().strip()
|
|
|
|
|
|
|
|
|
desc_elements = soup.find_all('p', class_='break-word')
|
|
|
for p in desc_elements:
|
|
|
style = p.get('style', '')
|
|
|
if 'margin-bottom: 0px' in style or 'margin-bottom:0px' in style:
|
|
|
span = p.find('span')
|
|
|
if span:
|
|
|
return span.get_text().strip()
|
|
|
else:
|
|
|
return p.get_text().strip()
|
|
|
|
|
|
return None
|
|
|
|
|
|
def extract_code(self, soup: BeautifulSoup) -> Optional[str]:
|
|
|
"""Извлечение кода шаблона"""
|
|
|
|
|
|
code_element = soup.find('code', class_='1c')
|
|
|
if code_element:
|
|
|
return self.clean_1c_code(code_element)
|
|
|
return None
|
|
|
|
|
|
def clean_1c_code(self, code_element) -> str:
|
|
|
"""Очистка кода 1С с сохранением отступов"""
|
|
|
|
|
|
code_text = code_element.get_text()
|
|
|
|
|
|
|
|
|
code_text = code_text.replace('"', '"')
|
|
|
code_text = code_text.replace('<', '<')
|
|
|
code_text = code_text.replace('>', '>')
|
|
|
code_text = code_text.replace('&', '&')
|
|
|
|
|
|
|
|
|
code_text = code_text.replace('\r\n', '\n')
|
|
|
code_text = code_text.replace('\r', '\n')
|
|
|
|
|
|
|
|
|
lines = code_text.split('\n')
|
|
|
cleaned_lines = []
|
|
|
|
|
|
for line in lines:
|
|
|
|
|
|
cleaned_line = line.rstrip()
|
|
|
cleaned_lines.append(cleaned_line)
|
|
|
|
|
|
|
|
|
while cleaned_lines and not cleaned_lines[0].strip():
|
|
|
cleaned_lines.pop(0)
|
|
|
while cleaned_lines and not cleaned_lines[-1].strip():
|
|
|
cleaned_lines.pop()
|
|
|
|
|
|
return '\n'.join(cleaned_lines)
|
|
|
|
|
|
def extract_comments(self, soup: BeautifulSoup) -> List[str]:
|
|
|
"""Извлечение комментариев"""
|
|
|
comments = []
|
|
|
|
|
|
comments_section = soup.find('div', id='comments_section')
|
|
|
if not comments_section:
|
|
|
return comments
|
|
|
|
|
|
|
|
|
comment_divs = comments_section.find_all('div', id=True)
|
|
|
|
|
|
for comment_div in comment_divs:
|
|
|
|
|
|
if comment_div.get('id') == 'last_comment':
|
|
|
continue
|
|
|
|
|
|
|
|
|
comment_copy = comment_div.__copy__()
|
|
|
|
|
|
|
|
|
first_div = comment_copy.find('div')
|
|
|
if first_div:
|
|
|
first_div.decompose()
|
|
|
|
|
|
|
|
|
last_div = comment_copy.find('div', style=lambda x: x and 'margin-top: 15px' in x)
|
|
|
if last_div:
|
|
|
last_div.decompose()
|
|
|
|
|
|
|
|
|
hr_tags = comment_copy.find_all('hr')
|
|
|
for hr in hr_tags:
|
|
|
hr.decompose()
|
|
|
|
|
|
|
|
|
comment_text = comment_copy.get_text().strip()
|
|
|
if comment_text:
|
|
|
comments.append(comment_text)
|
|
|
|
|
|
return comments
|
|
|
|
|
|
def count_links_in_text(self, text: str) -> int:
|
|
|
"""Подсчет количества ссылок в тексте"""
|
|
|
|
|
|
url_pattern = r'https?://[^\s<>"{}|\\^`\[\]]+'
|
|
|
links = re.findall(url_pattern, text)
|
|
|
return len(links)
|
|
|
|
|
|
def has_links_in_text(self, text: str) -> bool:
|
|
|
"""Проверка наличия ссылок в тексте"""
|
|
|
url_pattern = r'https?://[^\s<>"{}|\\^`\[\]]+'
|
|
|
return bool(re.search(url_pattern, text))
|
|
|
|
|
|
def format_solution(self, description: str, code: str, comments: List[str]) -> str:
|
|
|
"""Форматирование решения в markdown"""
|
|
|
solution_parts = []
|
|
|
|
|
|
|
|
|
if description:
|
|
|
solution_parts.append(description)
|
|
|
|
|
|
|
|
|
if code:
|
|
|
solution_parts.append("# Код реализации")
|
|
|
solution_parts.append(f"```1c\n{code}\n```")
|
|
|
|
|
|
|
|
|
if comments:
|
|
|
solution_parts.append("# Примечания")
|
|
|
for comment in comments:
|
|
|
solution_parts.append(f"- {comment}")
|
|
|
|
|
|
return '\n\n'.join(solution_parts)
|
|
|
|
|
|
async def parse_template(self, session: aiohttp.ClientSession, template_id: str) -> Optional[Dict]:
|
|
|
"""Парсинг отдельного шаблона"""
|
|
|
if template_id in self.processed_ids:
|
|
|
logger.debug(f"Шаблон {template_id} уже обработан")
|
|
|
return None
|
|
|
|
|
|
url = f"{self.base_url}/Templates/{template_id}"
|
|
|
logger.info(f"Парсинг шаблона {template_id}")
|
|
|
|
|
|
html = await self.fetch_page(session, url)
|
|
|
if not html:
|
|
|
return None
|
|
|
|
|
|
soup = BeautifulSoup(html, 'html.parser')
|
|
|
|
|
|
|
|
|
title = self.extract_title(soup)
|
|
|
if not title:
|
|
|
logger.warning(f"Не найден заголовок для шаблона {template_id}")
|
|
|
return None
|
|
|
|
|
|
description = self.extract_description(soup)
|
|
|
code = self.extract_code(soup)
|
|
|
tags = self.extract_tags(soup)
|
|
|
comments = self.extract_comments(soup)
|
|
|
|
|
|
|
|
|
solution = self.format_solution(description or "", code or "", comments)
|
|
|
|
|
|
|
|
|
link_count = self.count_links_in_text(solution)
|
|
|
has_links = self.has_links_in_text(solution)
|
|
|
|
|
|
result = {
|
|
|
'source': 'fastcode_Templates',
|
|
|
'in_source_id': template_id,
|
|
|
'prompt': title,
|
|
|
'solution': solution,
|
|
|
'tags': ','.join(tags) if tags else '',
|
|
|
'is_answer_a_link': has_links,
|
|
|
'has_link': link_count if link_count > 0 else None
|
|
|
}
|
|
|
|
|
|
logger.info(f"Обработан шаблон {template_id}: '{title}'")
|
|
|
self.processed_ids.add(template_id)
|
|
|
return result
|
|
|
|
|
|
async def process_templates_batch(self, session: aiohttp.ClientSession, template_ids: List[str]) -> List[Dict]:
|
|
|
"""Обработка пакета шаблонов"""
|
|
|
tasks = [self.parse_template(session, template_id) for template_id in template_ids]
|
|
|
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
|
|
|
|
valid_results = []
|
|
|
for result in results:
|
|
|
if isinstance(result, dict):
|
|
|
valid_results.append(result)
|
|
|
elif isinstance(result, Exception):
|
|
|
logger.error(f"Ошибка при обработке шаблона: {result}")
|
|
|
|
|
|
return valid_results
|
|
|
|
|
|
def escape_for_csv(self, text: str) -> str:
|
|
|
"""Экранирование специальных символов для CSV"""
|
|
|
if not text:
|
|
|
return text
|
|
|
|
|
|
|
|
|
text = text.replace('\\', '\\\\')
|
|
|
text = text.replace('\r\n', '\n')
|
|
|
text = text.replace('\r', '\n')
|
|
|
text = text.replace('\n', '\\n')
|
|
|
text = text.replace('\t', '\\t')
|
|
|
|
|
|
return text
|
|
|
|
|
|
def save_to_csv(self, data: List[Dict]):
|
|
|
"""Сохранение данных в CSV файл"""
|
|
|
if not data:
|
|
|
return
|
|
|
|
|
|
with open(self.csv_file, 'a', newline='', encoding='utf-8') as file:
|
|
|
writer = csv.DictWriter(file, fieldnames=['source', 'in_source_id', 'prompt', 'solution', 'tags', 'is_answer_a_link', 'has_link'],
|
|
|
quoting=csv.QUOTE_ALL)
|
|
|
for row in data:
|
|
|
|
|
|
escaped_row = {}
|
|
|
for key, value in row.items():
|
|
|
if isinstance(value, str):
|
|
|
escaped_row[key] = self.escape_for_csv(value)
|
|
|
else:
|
|
|
escaped_row[key] = value
|
|
|
writer.writerow(escaped_row)
|
|
|
|
|
|
logger.info(f"Сохранено {len(data)} записей в {self.csv_file}")
|
|
|
|
|
|
async def parse_all_pages(self, start_page: int = 1, end_page: int = 36, batch_size: int = 10):
|
|
|
"""Парсинг всех страниц с шаблонами"""
|
|
|
connector = aiohttp.TCPConnector(limit=20, limit_per_host=10)
|
|
|
timeout = aiohttp.ClientTimeout(total=60)
|
|
|
|
|
|
async with aiohttp.ClientSession(
|
|
|
connector=connector,
|
|
|
timeout=timeout,
|
|
|
headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'}
|
|
|
) as session:
|
|
|
|
|
|
for page_num in range(start_page, end_page + 1):
|
|
|
try:
|
|
|
logger.info(f"Обработка страницы {page_num} из {end_page}")
|
|
|
|
|
|
|
|
|
template_ids = await self.parse_templates_list_page(session, page_num)
|
|
|
|
|
|
if not template_ids:
|
|
|
logger.info(f"Нет шаблонов для обработки на странице {page_num}")
|
|
|
continue
|
|
|
|
|
|
|
|
|
new_template_ids = [tid for tid in template_ids if tid not in self.processed_ids]
|
|
|
|
|
|
logger.info(f"Новых шаблонов для обработки: {len(new_template_ids)}")
|
|
|
|
|
|
if not new_template_ids:
|
|
|
continue
|
|
|
|
|
|
|
|
|
for i in range(0, len(new_template_ids), batch_size):
|
|
|
batch = new_template_ids[i:i + batch_size]
|
|
|
logger.info(f"Обработка пакета {i//batch_size + 1}, шаблонов в пакете: {len(batch)}")
|
|
|
|
|
|
|
|
|
batch_results = await self.process_templates_batch(session, batch)
|
|
|
|
|
|
|
|
|
if batch_results:
|
|
|
self.save_to_csv(batch_results)
|
|
|
|
|
|
|
|
|
await asyncio.sleep(2)
|
|
|
|
|
|
logger.info(f"Страница {page_num} обработана")
|
|
|
|
|
|
except Exception as e:
|
|
|
logger.error(f"Ошибка при обработке страницы {page_num}: {e}")
|
|
|
continue
|
|
|
|
|
|
async def main():
|
|
|
"""Основная функция"""
|
|
|
parser = FastCodeTemplatesParser(csv_file='fastcode_templates.csv', delay=1.0)
|
|
|
|
|
|
try:
|
|
|
await parser.parse_all_pages(start_page=1, end_page=36, batch_size=5)
|
|
|
logger.info("Парсинг завершен")
|
|
|
except KeyboardInterrupt:
|
|
|
logger.info("Парсинг прерван пользователем")
|
|
|
except Exception as e:
|
|
|
logger.error(f"Критическая ошибка: {e}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
asyncio.run(main()) |