@article{simpleseg,
title={Towards Pixel-Level VLM Perception via Simple Points Prediction},
author={Anonymous},
journal={arXiv preprint arXiv:2025.xxxxx},
year={2025}
}
We present SimpleSeg, a strikingly simple yet highly effective approach to endow Multimodal Large Language Models (MLLMs) with native pixel-level perception. Our method reframes segmentation as a simple sequence generation problem: the model directly predicts sequence of points (textual coordinates) delineating object boundaries, entirely within its language space. To achieve high fidelity, we introduce a two-stage SFT→RL training pipeline, where Reinforcement Learning with an IoU-based reward refines the point sequences to accurately match ground-truth contours. We find that the standard MLLM architecture possesses a strong, inherent capacity for low-level perception that can be unlocked without any specialized architecture. On segmentation benchmarks, SimpleSeg achieves performance that is comparable to, and often surpasses, methods relying on complex, task-specific designs. This work lays out that precise spatial understanding can emerge from simple point prediction, challenging the prevailing need for auxiliary components and paving the way for more unified and capable VLMs.
@article{simpleseg,
title={Towards Pixel-Level VLM Perception via Simple Points Prediction},
author={Anonymous},
journal={arXiv preprint arXiv:2025.xxxxx},
year={2025}
}