XCiT: Cross-Covariance Image Transformers
Paper
•
2106.09681
•
Published
A XCiT image classification model. This model was trained on the il-common dataset, which contains common bird species found in Israel.
The species list is derived from data available at https://www.israbirding.com/checklist/.
Model Type: Image classification and detection backbone
Model Stats:
Dataset: il-common (371 classes)
Papers:
import birder
from birder.inference.classification import infer_image
(net, model_info) = birder.load_pretrained_model("xcit_nano12_p16_il-common", inference=True)
# Get the image size the model was trained on
size = birder.get_size_from_signature(model_info.signature)
# Create an inference transform
transform = birder.classification_transform(size, model_info.rgb_stats)
image = "path/to/image.jpeg" # or a PIL image, must be loaded in RGB format
(out, _) = infer_image(net, image, transform)
# out is a NumPy array with shape of (1, 371), representing class probabilities.
import birder
from birder.inference.classification import infer_image
(net, model_info) = birder.load_pretrained_model("xcit_nano12_p16_il-common", inference=True)
# Get the image size the model was trained on
size = birder.get_size_from_signature(model_info.signature)
# Create an inference transform
transform = birder.classification_transform(size, model_info.rgb_stats)
image = "path/to/image.jpeg" # or a PIL image
(out, embedding) = infer_image(net, image, transform, return_embedding=True)
# embedding is a NumPy array with shape of (1, 128)
from PIL import Image
import birder
(net, model_info) = birder.load_pretrained_model("xcit_nano12_p16_il-common", inference=True)
# Get the image size the model was trained on
size = birder.get_size_from_signature(model_info.signature)
# Create an inference transform
transform = birder.classification_transform(size, model_info.rgb_stats)
image = Image.open("path/to/image.jpeg")
features = net.detection_features(transform(image).unsqueeze(0))
# features is a dict (stage name -> torch.Tensor)
print([(k, v.size()) for k, v in features.items()])
# Output example:
# [('stage1', torch.Size([1, 128, 16, 16])),
# ('stage2', torch.Size([1, 128, 16, 16])),
# ('stage3', torch.Size([1, 128, 16, 16])),
# ('stage4', torch.Size([1, 128, 16, 16]))]
@misc{elnouby2021xcitcrosscovarianceimagetransformers,
title={XCiT: Cross-Covariance Image Transformers},
author={Alaaeldin El-Nouby and Hugo Touvron and Mathilde Caron and Piotr Bojanowski and Matthijs Douze and Armand Joulin and Ivan Laptev and Natalia Neverova and Gabriel Synnaeve and Jakob Verbeek and Hervé Jegou},
year={2021},
eprint={2106.09681},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2106.09681},
}