From 83d84e90ed5defb3481e4f4612cceefedb0094df Mon Sep 17 00:00:00 2001 From: chanx <1243304602@qq.com> Date: Thu, 13 Nov 2025 09:50:10 +0800 Subject: [PATCH] Fix: Profile picture cropping supported #10703 (#11221) ### What problem does this PR solve? Fix: Profile picture cropping supported ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue) --- web/src/components/avatar-upload.tsx | 287 ++++++++++++++++++++++++++- web/src/locales/en.ts | 3 + web/src/locales/zh.ts | 2 + web/src/utils/file-util.ts | 7 +- 4 files changed, 292 insertions(+), 7 deletions(-) diff --git a/web/src/components/avatar-upload.tsx b/web/src/components/avatar-upload.tsx index 7a85e08de..9f4a37076 100644 --- a/web/src/components/avatar-upload.tsx +++ b/web/src/components/avatar-upload.tsx @@ -5,12 +5,14 @@ import { forwardRef, useCallback, useEffect, + useRef, useState, } from 'react'; import { useTranslation } from 'react-i18next'; import { Avatar, AvatarFallback, AvatarImage } from './ui/avatar'; import { Button } from './ui/button'; import { Input } from './ui/input'; +import { Modal } from './ui/modal/modal'; type AvatarUploadProps = { value?: string; @@ -22,14 +24,24 @@ export const AvatarUpload = forwardRef( function AvatarUpload({ value, onChange, tips }, ref) { const { t } = useTranslation(); const [avatarBase64Str, setAvatarBase64Str] = useState(''); // Avatar Image base64 + const [isCropModalOpen, setIsCropModalOpen] = useState(false); + const [imageToCrop, setImageToCrop] = useState(null); + const [cropArea, setCropArea] = useState({ x: 0, y: 0, size: 200 }); + const imageRef = useRef(null); + const canvasRef = useRef(null); + const containerRef = useRef(null); + const isDraggingRef = useRef(false); + const dragStartRef = useRef({ x: 0, y: 0 }); + const [imageScale, setImageScale] = useState(1); + const [imageOffset, setImageOffset] = useState({ x: 0, y: 0 }); const handleChange: ChangeEventHandler = useCallback( async (ev) => { const file = ev.target?.files?.[0]; if (/\.(jpg|jpeg|png|webp|bmp)$/i.test(file?.name ?? '')) { - const str = await transformFile2Base64(file!); - setAvatarBase64Str(str); - onChange?.(str); + const str = await transformFile2Base64(file!, 1000); + setImageToCrop(str); + setIsCropModalOpen(true); } ev.target.value = ''; }, @@ -41,17 +53,209 @@ export const AvatarUpload = forwardRef( onChange?.(''); }, [onChange]); + const handleCrop = useCallback(() => { + if (!imageRef.current || !canvasRef.current) return; + + const canvas = canvasRef.current; + const ctx = canvas.getContext('2d'); + const image = imageRef.current; + + if (!ctx) return; + + // Set canvas size to 64x64 (avatar size) + canvas.width = 64; + canvas.height = 64; + + // Draw cropped image on canvas + ctx.drawImage( + image, + cropArea.x, + cropArea.y, + cropArea.size, + cropArea.size, + 0, + 0, + 64, + 64, + ); + + // Convert to base64 + const croppedImageBase64 = canvas.toDataURL('image/png'); + setAvatarBase64Str(croppedImageBase64); + onChange?.(croppedImageBase64); + setIsCropModalOpen(false); + }, [cropArea, onChange]); + + const handleCancelCrop = useCallback(() => { + setIsCropModalOpen(false); + setImageToCrop(null); + }, []); + + const initCropArea = useCallback(() => { + if (!imageRef.current || !containerRef.current) return; + + const image = imageRef.current; + const container = containerRef.current; + + // Calculate image scale to fit container + const scale = Math.min( + container.clientWidth / image.width, + container.clientHeight / image.height, + ); + setImageScale(scale); + + // Calculate image offset to center it + const scaledWidth = image.width * scale; + const scaledHeight = image.height * scale; + const offsetX = (container.clientWidth - scaledWidth) / 2; + const offsetY = (container.clientHeight - scaledHeight) / 2; + setImageOffset({ x: offsetX, y: offsetY }); + + // Initialize crop area to center of image + const size = Math.min(scaledWidth, scaledHeight) * 0.8; // 80% of the smaller dimension + const x = (image.width - size / scale) / 2; + const y = (image.height - size / scale) / 2; + + setCropArea({ x, y, size: size / scale }); + }, []); + + const handleMouseMove = useCallback( + (e: MouseEvent) => { + if ( + !isDraggingRef.current || + !imageRef.current || + !containerRef.current + ) + return; + + const image = imageRef.current; + const container = containerRef.current; + const containerRect = container.getBoundingClientRect(); + + // Calculate mouse position relative to container + const mouseX = e.clientX - containerRect.left; + const mouseY = e.clientY - containerRect.top; + + // Calculate mouse position relative to image + const imageX = (mouseX - imageOffset.x) / imageScale; + const imageY = (mouseY - imageOffset.y) / imageScale; + + // Calculate new crop area position based on mouse movement + let newX = imageX - dragStartRef.current.x; + let newY = imageY - dragStartRef.current.y; + + // Boundary checks + newX = Math.max(0, Math.min(newX, image.width - cropArea.size)); + newY = Math.max(0, Math.min(newY, image.height - cropArea.size)); + + setCropArea((prev) => ({ + ...prev, + x: newX, + y: newY, + })); + }, + [cropArea.size, imageScale, imageOffset], + ); + + const handleMouseUp = useCallback(() => { + isDraggingRef.current = false; + document.removeEventListener('mousemove', handleMouseMove); + document.removeEventListener('mouseup', handleMouseUp); + }, [handleMouseMove]); + + const handleMouseDown = useCallback( + (e: React.MouseEvent) => { + e.preventDefault(); + e.stopPropagation(); + isDraggingRef.current = true; + if (imageRef.current && containerRef.current) { + const container = containerRef.current; + const containerRect = container.getBoundingClientRect(); + + // Calculate mouse position relative to container + const mouseX = e.clientX - containerRect.left; + const mouseY = e.clientY - containerRect.top; + + // Calculate mouse position relative to image + const imageX = (mouseX - imageOffset.x) / imageScale; + const imageY = (mouseY - imageOffset.y) / imageScale; + + // Store the offset between mouse position and crop area position + dragStartRef.current = { + x: imageX - cropArea.x, + y: imageY - cropArea.y, + }; + } + document.addEventListener('mousemove', handleMouseMove); + document.addEventListener('mouseup', handleMouseUp); + }, + [cropArea, imageScale, imageOffset], + ); + + const handleWheel = useCallback((e: React.WheelEvent) => { + if (!imageRef.current) return; + + e.preventDefault(); + const image = imageRef.current; + const delta = e.deltaY > 0 ? 0.9 : 1.1; // Zoom factor + + setCropArea((prev) => { + const newSize = Math.max( + 20, + Math.min(prev.size * delta, Math.min(image.width, image.height)), + ); + + // Adjust position to keep crop area centered + const centerRatioX = (prev.x + prev.size / 2) / image.width; + const centerRatioY = (prev.y + prev.size / 2) / image.height; + + const newX = centerRatioX * image.width - newSize / 2; + const newY = centerRatioY * image.height - newSize / 2; + + // Boundary checks + const boundedX = Math.max(0, Math.min(newX, image.width - newSize)); + const boundedY = Math.max(0, Math.min(newY, image.height - newSize)); + + return { + x: boundedX, + y: boundedY, + size: newSize, + }; + }); + }, []); + useEffect(() => { if (value) { setAvatarBase64Str(value); } }, [value]); + useEffect(() => { + const container = containerRef.current; + setTimeout(() => { + console.log('container', container); + // initCropArea(); + if (imageToCrop && container && isCropModalOpen) { + container.addEventListener( + 'wheel', + handleWheel as unknown as EventListener, + { passive: false }, + ); + return () => { + container.removeEventListener( + 'wheel', + handleWheel as unknown as EventListener, + ); + }; + } + }, 100); + }, [handleWheel, containerRef.current]); + return (
{!avatarBase64Str ? ( -
+

{t('common.upload')}

@@ -60,7 +264,7 @@ export const AvatarUpload = forwardRef( ) : (
- +
@@ -93,6 +297,79 @@ export const AvatarUpload = forwardRef(
{tips ?? t('knowledgeConfiguration.photoTip')}
+ + {/* Crop Modal */} + { + setIsCropModalOpen(open); + if (!open) { + setImageToCrop(null); + } + }} + title={t('setting.cropImage')} + size="small" + onCancel={handleCancelCrop} + onOk={handleCrop} + // footer={ + //
+ // + // + //
+ // } + > +
+ {imageToCrop && ( +
+
+ To crop + {imageRef.current && ( +
+ )} +
+
+

+ {t('setting.cropTip')} +

+
+ +
+ )} +
+
); }, diff --git a/web/src/locales/en.ts b/web/src/locales/en.ts index e176b7e3d..915508692 100644 --- a/web/src/locales/en.ts +++ b/web/src/locales/en.ts @@ -694,6 +694,9 @@ This auto-tagging feature enhances retrieval by adding another layer of domain-s tocEnhanceTip: ` During the parsing of the document, table of contents information was generated (see the 'Enable Table of Contents Extraction' option in the General method). This allows the large model to return table of contents items relevant to the user's query, thereby using these items to retrieve related chunks and apply weighting to these chunks during the sorting process. This approach is derived from mimicking the behavioral logic of how humans search for knowledge in books.`, }, setting: { + cropTip: + 'Drag the selection area to choose the cropping position of the image, and scroll to zoom in/out', + cropImage: 'Crop image', selectModelPlaceholder: 'Select model', configureModelTitle: 'Configure model', confluenceIsCloudTip: diff --git a/web/src/locales/zh.ts b/web/src/locales/zh.ts index 4e6f7e0d9..a5f4a9d52 100644 --- a/web/src/locales/zh.ts +++ b/web/src/locales/zh.ts @@ -684,6 +684,8 @@ General:实体和关系提取提示来自 GitHub - microsoft/graphrag:基于 tocEnhanceTip: `解析文档时生成了目录信息(见General方法的‘启用目录抽取’),让大模型返回和用户问题相关的目录项,从而利用目录项拿到相关chunk,对这些chunk在排序中进行加权。这种方法来源于模仿人类查询书本中知识的行为逻辑`, }, setting: { + cropTip: '拖动选区可以选择要图片的裁剪位置,滚动可以放大/缩小选区', + cropImage: '剪裁图片', selectModelPlaceholder: '请选择模型', configureModelTitle: '配置模型', confluenceIsCloudTip: diff --git a/web/src/utils/file-util.ts b/web/src/utils/file-util.ts index 6d8ef9a43..a9d2968c0 100644 --- a/web/src/utils/file-util.ts +++ b/web/src/utils/file-util.ts @@ -2,7 +2,10 @@ import { FileMimeType } from '@/constants/common'; import fileManagerService from '@/services/file-manager-service'; import { UploadFile } from 'antd'; -export const transformFile2Base64 = (val: any): Promise => { +export const transformFile2Base64 = ( + val: any, + imgSize?: number, +): Promise => { return new Promise((resolve, reject) => { const reader = new FileReader(); reader.readAsDataURL(val); @@ -19,7 +22,7 @@ export const transformFile2Base64 = (val: any): Promise => { // Calculate compressed dimensions, set max width/height to 800px let width = img.width; let height = img.height; - const maxSize = 100; + const maxSize = imgSize ?? 100; if (width > height && width > maxSize) { height = (height * maxSize) / width;