@article{763, keywords = {Sketch-to-image generation, Sketch-to-image synthesis, Computer vision, Generative adversarial networks, Instance and semantic segmentation, Machine learning}, author = {Samah Baraheem and Tam Nguyen}, title = {Sketch-to-image synthesis via semantic masks}, abstract = {Sketch-to-image is an important task to reduce the burden of creating a color image from scratch. Unlike previous sketch-to-image models, where the image is synthesized in an end-to-end manner, leading to an unnaturalistic image, we propose a method by decomposing the problem into subproblems to generate a more naturalistic and reasonable image. It first generates an intermediate output which is a semantic mask map from the input sketch through instance and semantic segmentation in two levels, background segmentation and foreground segmentation. Background segmentation is formed based on the context of the foreground objects. Then, the foreground segmentations are sequentially added to the created background segmentation. Finally, the generated mask map is fed into an image-to-image translation model to generate an image. Our proposed method works with 92 distinct classes. Compared to state-of-the-art sketch-to-image models, our proposed method outperforms the previous methods and generates better images.}, year = {2024}, journal = {Multimedia Tools and Applications}, volume = {83}, chapter = {29047}, pages = {20}, month = {03}, publisher = {Springer}, issn = {1573-7721}, url = {https://par.nsf.gov/biblio/10521807}, doi = {10.1007/s11042-023-16704-z}, }