
    h+                         d dl mZ d dlmZ d dlmZmZmZmZ d dl	Z
d dlZd dlmc mZ d dlmZ d dlmZmZmZ d dlmZ d dlmZmZ  G d	 d
e      Zy)    )
ThreadPool)Path)AnyDictListTupleN)DetectionValidator)LOGGERNUM_THREADSops)check_requirements)SegmentMetricsmask_iouc            
       ^    e Zd ZdZdd fdZdeeef   deeef   f fdZde	j                  j                  ddf fdZdefd	Zd
ee	j                     deeee	j                  f      f fdZdedeeef   deeef   f fdZd
eee	j                  f   deeef   deeej(                  f   f fdZdeeef   d
eeee	j                  f      deddf fdZde	j                  dedeeef   deddf
dZdeee	j                  f   deeef   ddf fdZdeeef   deeef   f fdZ xZS )SegmentationValidatora  
    A class extending the DetectionValidator class for validation based on a segmentation model.

    This validator handles the evaluation of segmentation models, processing both bounding box and mask predictions
    to compute metrics such as mAP for both detection and segmentation tasks.

    Attributes:
        plot_masks (list): List to store masks for plotting.
        process (callable): Function to process masks based on save_json and save_txt flags.
        args (namespace): Arguments for the validator.
        metrics (SegmentMetrics): Metrics calculator for segmentation tasks.
        stats (dict): Dictionary to store statistics during validation.

    Examples:
        >>> from ultralytics.models.yolo.segment import SegmentationValidator
        >>> args = dict(model="yolo11n-seg.pt", data="coco8-seg.yaml")
        >>> validator = SegmentationValidator(args=args)
        >>> validator()
    Nreturnc                 x    t         |   ||||       d| _        d| j                  _        t               | _        y)a  
        Initialize SegmentationValidator and set task to 'segment', metrics to SegmentMetrics.

        Args:
            dataloader (torch.utils.data.DataLoader, optional): Dataloader to use for validation.
            save_dir (Path, optional): Directory to save results.
            args (namespace, optional): Arguments for the validator.
            _callbacks (list, optional): List of callback functions.
        Nsegment)super__init__processargstaskr   metrics)self
dataloadersave_dirr   
_callbacks	__class__s        a/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/ultralytics/models/yolo/segment/val.pyr   zSegmentationValidator.__init__&   s4     	XtZ@"		%'    batchc                     t         |   |      }|d   j                  | j                        j	                         |d<   |S )z
        Preprocess batch of images for YOLO segmentation validation.

        Args:
            batch (Dict[str, Any]): Batch containing images and annotations.

        Returns:
            (Dict[str, Any]): Preprocessed batch.
        masks)r   
preprocesstodevicefloat)r   r"   r   s     r    r%   z SegmentationValidator.preprocess5   s>     "5)w**4;;7==?gr!   modelc                    t         |   |       | j                  j                  rt	        d       | j                  j                  s| j                  j
                  rt        j                  | _	        yt        j                  | _	        y)z
        Initialize metrics and select mask processing function based on save_json flag.

        Args:
            model (torch.nn.Module): Model to validate.
        zfaster-coco-eval>=1.6.7N)
r   init_metricsr   	save_jsonr   save_txtr   process_mask_nativeprocess_maskr   )r   r)   r   s     r    r+   z"SegmentationValidator.init_metricsC   sZ     	U#998926))2E2EI[I[s..adaqaqr!   c                     ddz  S )z5Return a formatted description of evaluation metrics.z,%22s%11s%11s%11s%11s%11s%11s%11s%11s%11s%11s)ClassImages	InstanceszBox(PRmAP50	mAP50-95)zMask(Pr4   r5   r6    )r   s    r    get_desczSegmentationValidator.get_descP   s    $ )
 
 	
r!   predsc                    t        |d         dk(  r|d   d   n|d   }t        | 	  |d         }|j                  dd D cg c]  }d|z  	 }}t	        |      D ]  \  }}|j                  d      }t        |      r| j                  ||   ||d	   |
      nat        j                  dg| j                  t        j                  u r|n|j                  dd t        j                  |d	   j                        |d<    |S c c}w )a  
        Post-process YOLO predictions and return output detections with proto.

        Args:
            preds (List[torch.Tensor]): Raw predictions from the model.

        Returns:
            List[Dict[str, torch.Tensor]]: Processed detection predictions with masks.
              r      N   extrabboxes)shape)dtyper'   r$   )lenr   postprocessrB   	enumeratepopr   torchzerosr   r.   uint8r'   )	r   r9   protoximgszipredcoefficientr   s	           r    rE   z!SegmentationValidator.postprocess`   s    !$E!H 2aa#E!H- %AB01Q00 ' 
	GAt((7+K {# U1X{DN%P[[a4<<33J3J#J%PUP[P[\]\^P_a++>00 M
	  1s   Dsic                     t         |   ||      }| j                  j                  r|gn|d   |k(  }|d   |   |d<   |S )a<  
        Prepare a batch for training or inference by processing images and targets.

        Args:
            si (int): Batch index.
            batch (Dict[str, Any]): Batch data containing images and annotations.

        Returns:
            (Dict[str, Any]): Prepared batch with processed annotations.
        	batch_idxr$   )r   _prepare_batchr   overlap_mask)r   rQ   r"   prepared_batchmidxr   s        r    rT   z$SegmentationValidator._prepare_batchz   sN     /E:yy--t53E3K"'."6wr!   c                    t         |   ||      }|d   |d   }}t        |      dk(  st        |d         dk(  r5t        j                  t        |d         | j
                  ft              }n\|d   }| j                  j                  rmt        |      }t        j                  ||j                        j                  |dd      dz   }	|j                  |dd      }t        j                  ||	k(  dd      }|j                  dd	 |j                  dd	 k7  r=t!        j"                  |d	   |j                  dd	 d
d      d   }|j%                  d      }t'        |j                  |j                  d   d      |j                  |j                  d   d            }
| j)                  |d   ||
      j+                         j-                         }|j/                  d|i       |S )a	  
        Compute correct prediction matrix for a batch based on bounding boxes and optional masks.

        Args:
            preds (Dict[str, torch.Tensor]): Dictionary containing predictions with keys like 'cls' and 'masks'.
            batch (Dict[str, Any]): Dictionary containing batch data with keys like 'cls' and 'masks'.

        Returns:
            (Dict[str, np.ndarray]): A dictionary containing correct prediction matrices including 'tp_m' for mask IoU.

        Notes:
            - If `masks` is True, the function computes IoU between predicted and ground truth masks.
            - If `overlap` is True and `masks` is True, overlapping masks are taken into account when computing IoU.

        Examples:
            >>> preds = {"cls": torch.tensor([1, 0]), "masks": torch.rand(2, 640, 640), "bboxes": torch.rand(2, 4)}
            >>> batch = {"cls": torch.tensor([1, 0]), "masks": torch.rand(2, 640, 640), "bboxes": torch.rand(2, 4)}
            >>> correct_preds = validator._process_batch(preds, batch)
        clsr$   r   rC   )r'   r;   g      ?g        NbilinearF)modealign_cornersg      ?r=   tp_m)r   _process_batchrD   nprI   niouboolr   rU   rH   aranger'   viewrepeatwhererB   Finterpolategt_r   match_predictionscpunumpyupdate)r   r9   r"   tpgt_clsgt_masksr^   
pred_masksnlindexiour   s              r    r_   z$SegmentationValidator._process_batch   s   ( W#E51 <wv;!s5<0A588Su.		:$GDwJyy%%[R@EEb!QORSS#??2q!4 ;;x5'8#sC~~ab!Z%5%5ab%99==$9I9I!"9MT^nstuvw#<<,8==):B?Q[QaQabcQdfhAijC))%,DHHJPPRD
		64.!	r!   nic                    |D ]g  }|d   }|j                   d   dkD  rt        j                  d       t        j                  |dd t        j
                        j                         |d<   i t        | !  |||d       y)a  
        Plot batch predictions with masks and bounding boxes.

        Args:
            batch (Dict[str, Any]): Batch containing images and annotations.
            preds (List[Dict[str, torch.Tensor]]): List of predictions from the model.
            ni (int): Batch index.
        r$   r   2   zBLimiting validation plots to first 50 items per image for speed...NrZ   )max_det)	rB   r
   warningrH   	as_tensorrJ   rk   r   plot_predictions)r   r"   r9   ru   pr$   r   s         r    r{   z&SegmentationValidator.plot_predictions   s{      	NAgJE{{1~"cds5;;GKKMAgJ		N
 	 r2 >r!   predn	save_confrB   filec                    ddl m}  |t        j                  |d   |d   ft        j                        d| j
                  t        j                  |d   |d   j                  d      |d	   j                  d      gd
      t        j                  |d   t        j                              j                  ||       y)a  
        Save YOLO detections to a txt file in normalized coordinates in a specific format.

        Args:
            predn (torch.Tensor): Predictions in the format (x1, y1, x2, y2, conf, class).
            save_conf (bool): Whether to save confidence scores.
            shape (Tuple[int, int]): Shape of the original image.
            file (Path): File path to save the detections.
        r   )Resultsr;   rZ   NrA   confr=   rY   )dimr$   )pathnamesboxesr$   )r~   )ultralytics.engine.resultsr   r`   rI   rJ   r   rH   cat	unsqueezerz   r-   )r   r}   r~   rB   r   r   s         r    save_one_txtz"SegmentationValidator.save_one_txt   s     	7HHeAha):**))U8_eFm.E.Eb.I5QV<KaKabdKeflmn//%.D	
 (49(
-r!   pbatchc                 <  
 ddl m
 
fd}t        j                  |d   t        j                        }t        j                  |j                  ddd      j                         j                         j                         |d   |d	   
      }t        j                  |d      }t        t              5 }|j                  ||      }ddd       t         | E  ||       t%              D ]$  \  }}	|	| j&                  t)        |       |z      d<   & y# 1 sw Y   LxY w)a  
        Save one JSON result for COCO evaluation.

        Args:
            predn (Dict[str, torch.Tensor]): Predictions containing bboxes, masks, confidence scores, and classes.
            pbatch (Dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.

        Examples:
             >>> result = {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
        r   )encodec                      t        j                  | dddddf   dd            d   }|d   j                  d      |d<   |S )z:Encode predicted masks as RLE and append results to jdict.Nrg   rJ   )orderrC   r   countszutf-8)r`   asarraydecode)rL   rler   s     r    single_encodez9SegmentationValidator.pred_to_json.<locals>.single_encode   sF    AaDjMGLMaPCM009CMJr!   r$   rZ   r;   r>   	ori_shape	ratio_pad)r   )r>   r   r;   Nsegmentation)faster_coco_eval.core.maskr   rH   rz   rJ   r   scale_imagepermute
contiguousrk   rl   r`   	transposer   r   mapr   pred_to_jsonrF   jdictrD   )r   r}   r   r   
coco_masksrq   poolrlesrN   rr   r   s             @r    r   z"SegmentationValidator.pred_to_json   s     	6	 __U7^5;;G
__q!Q'22488:@@B;[)


 \\*i8
$ 	788M:6D	7UF+dO 	;DAq9:DJJD	zA~&~6	;	7 	7s   4DDstatsc                     | j                   dz  }| j                  d   dz  | j                  rdnd| j                  j                   dz  }t
        |   |||ddgd	d
g      S )z;Return COCO-style instance segmentation evaluation metrics.zpredictions.jsonr   annotationszinstances_val2017.jsonlvis_v1_z.jsonbboxsegmBoxMask)suffix)r   datais_cocor   splitr   coco_evaluate)r   r   	pred_json	anno_jsonr   s       r    	eval_jsonzSegmentationValidator.eval_json   sy    MM$66	IIf+/<<'x		GXX]=^` 	
 w$UIy66BR\aci[j$kkr!   )NNNN)r   N)__name__
__module____qualname____doc__r   r   strr   r%   rH   nnModuler+   r8   r   TensorrE   intrT   r`   ndarrayr_   r{   rb   r   r   r   r   r   __classcell__)r   s   @r    r   r      s   ((S#X 4S> r%((// rd r
# 
 ell!3 T#u||BS=T8U 4 T#s(^ S#X  %Dell):$; %DcN %W[\_acakak\kWl %N?d38n ?T$sELLGXBY=Z ?`c ?hl ? .%,, .4 .cSVh ._c .hl .(;$sELL'8"9 ;4S> ;VZ ;@ltCH~ l$sCx. l lr!   r   )multiprocessing.poolr   pathlibr   typingr   r   r   r   rl   r`   rH   torch.nn.functionalr   
functionalrg   ultralytics.models.yolo.detectr	   ultralytics.utilsr
   r   r   ultralytics.utils.checksr   ultralytics.utils.metricsr   r   r   r7   r!   r    <module>r      s?    ,  ) )     = 6 6 7 >ll. llr!   