<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:image="http://www.google.com/schemas/sitemap-image/1.1" xmlns:xhtml="http://www.w3.org/1999/xhtml">
  <url>
    <loc>https://xr.cornell.edu/workshop/2019/sponsors-gallery</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2021-06-02</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1547747318308-QBHQU23NP00DL3KY5SOM/Facebook-06-2015-White-on-Blue.png</image:loc>
      <image:title>CV4ARVR 2019 - Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1547747318308-QBHQU23NP00DL3KY5SOM/Facebook-06-2015-White-on-Blue.png</image:loc>
      <image:title>CV4ARVR 2019 - Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704795-HSS92RTBW3XW4LZ2G7AD/_ml.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704829-H6P6A3MZRNU0ZWV2WO3T/_g.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550176276905-583SZJTMR2JGDXL0JRX0/_c.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Sponsors</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/home</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2020-09-15</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1564755146786-6ZPTZM3U6T1ORWNKRXER/lines.png</image:loc>
      <image:title>Home - Welcome</image:title>
      <image:caption>The XR @ Cornell website gathers efforts from across Cornell’s campuses that relate to augmented and virtual reality, and their core disciplines of computer vision, computer graphics, and human-computer interaction.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1566508643558-3ZNWNNZZUS5S6BLQTA5Q/bgwhite.jpg</image:loc>
      <image:title>Home</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1565891452779-NBUT48V9HFPZQPT1FIL9/checkerboard.jpg</image:loc>
      <image:title>Home</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/gallery/demos-posters</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2020-01-06</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560370876095-WNN7R9IVA5LMIDH9RCU0/4_3D_Photos__CV_4_AR_VR_Workshop.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Practical 3D Photography</image:title>
      <image:caption>Johannes Kopf, Suhib Alsisan, Francis Ge, Yangming Chong, Kevin Matzen, Ocean Quigley, Josh Patterson, Jossie Tirado, Shu Wu, Michael F. Cohen CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560370876095-WNN7R9IVA5LMIDH9RCU0/4_3D_Photos__CV_4_AR_VR_Workshop.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Practical 3D Photography</image:title>
      <image:caption>Johannes Kopf, Suhib Alsisan, Francis Ge, Yangming Chong, Kevin Matzen, Ocean Quigley, Josh Patterson, Jossie Tirado, Shu Wu, Michael F. Cohen CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560448590989-DSJQ9H552KXATV778YWT/5_CV4AR_2019_BlazeFace_v2.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - BlazeFace: Sub-millisecond Neural Face Detection on Mobile GPUs</image:title>
      <image:caption>Valentin Bazarevsky, Yury Kartynnik, Andrey Vakunov, Karthik Raveendran, Matthias Grundmann CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1559947639167-BU3FR3ASTBHMRX7RVLPD/23_human_hair.png</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Human Hair Segmentation In The Wild Using Deep Shape Prior</image:title>
      <image:caption>PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560371139760-JJ9I0GEZ7MS483HVGSXK/6_CV4AR_Mesh.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Real-time Facial Surface Geometry from Monocular Video on Mobile GPUs</image:title>
      <image:caption>Yury Kartynnik, Artsiom Ablavatski, Ivan Grishchenko, Matthias Grundmann CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560371514455-PMBWHWD1HR56J01ZKEEI/7_stotko2019cv4arvr.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Towards Scalable Sharing of Immersive Live Telepresence Experiences Beyond Room-scale based on Efficient Real-time 3D Reconstruction and Streaming</image:title>
      <image:caption>Patrick Stotko, Stefan Krumpen, Reinhard Klein, Michael Weinmann CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560371981444-XRHX8BZVVQOKGB531HAD/8_Frajberg_CVPR_workshop_AR_VR.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Computer Vision-enhanced Augmented Reality for mountain outdoor exploration</image:title>
      <image:caption>Darian Frajberg, Piero Fraternali CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560372053587-RLOM88T8XQ9JBULBVV3V/9_CVPR_2019_VR.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Shooting Labels by Virtual Reality</image:title>
      <image:caption>Pierluigi Zama Ramirez , Claudio Paternesi, Daniele De Gregorio, Luigi Di Stefano CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560372981353-FOC8RINGS2JIG68BUB04/10_CV4ARVR2019-jet-camera-ready.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - On-Device Augmented Reality with Mobile GPUs</image:title>
      <image:caption>Juhyun Lee, Nikolay Chirkov, Ekaterina Ignasheva, Yury Pisarchyk, Mogan Shieh, Fabio Riccardi, Raman Sarokin, Andrei Kulik, and Matthias Grundmann CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560373304833-ONEEF4XIACGVACNA7AWV/12_Lipstick_Simulation_Paper_For_Submission.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - High-Quality AR Lipstick Simulation via Image Filtering Techniques</image:title>
      <image:caption>Kanstantsin Sokal, Siarhei Kazakou, Igor Kibalchich, Matsvei Zhdanovich CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560373447752-K43IUMNYM6YKJW3Q8D1I/13_BadiasEtAl.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Towards Simulated Reality</image:title>
      <image:caption>Alberto Badias, Iciar Alfaro, David Gonzalez, Elias Cueto, Francisco Chinesta CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560373619961-H1TPE9QNMLAG5NKRCHKC/14_nails-extended-abstract.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Nail Polish Try-On: Realtime Semantic Segmentation of Small Objects for Native and Browser Smartphone AR Applications</image:title>
      <image:caption>Brendan Duke, Abdalla Ahmed, Edmund Phung, Irina Kezele, Parham Aarabi CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560377518906-SRYXYT53H3WQZ1AAC9LZ/15_InstantMotionTracking_CVPR_paper_camera_ready_revised.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Instant Motion Tracking and Its Applications to Augmented Reality</image:title>
      <image:caption>Jianing Wei, Genzhi Ye, Tyler Mullen, Matthias Grundmann, Adel Ahmadyan, Tingbo Hou CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560377628031-ACABH239YAY6UYRGQV66/16_grasp_capture.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Towards Markerless Grasp Capture</image:title>
      <image:caption>Samarth Brahmbhatt, Charles C. Kemp, James Hays CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560377749949-Z72BL8TSSB9ZKMSWHU9C/17_CVPR19____lighting_AR__extended_abstract.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Fast Spatially-Varying Indoor Lighting Estimation</image:title>
      <image:caption>Mathieu Garon, Kalyan Sunkavalli, Sunil Hadap, Nathan Carr, Jean-Francois Lalonde CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560377853696-AD4L94PI2N7LTYHDHICS/19_cvpr19.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Content Assisted Viewport Prediction for Panoramic Video Streaming</image:title>
      <image:caption>Tan Xu, Feng Qian, Bo Han CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560449057337-77CPTPVKU5NIE3PQRHUB/21_CVPR_Tiny_CNN_workshop_camera_ready.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Lightweight Real-time Makeup Try-on in Mobile Browsers with Tiny CNN Models for Facial Tracking</image:title>
      <image:caption>TianXing Li, Zhi Yu, Edmund Phung, Brendan Duke, Irina Kezele, Parham Aarabi CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1578307523375-GJBSOUERZAY814NQ33WC/22_crv2_MediaPipe_CVPR_CV4ARVR_Workshop_2019_v2.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - MediaPipe: A Framework for Perceiving and Processing Reality</image:title>
      <image:caption>Camillo Lugaresi, Jiuqiang Tang, Hadon Nash, Chris McClanahan, Esha Uboweja, Michael Hays, Fan Zhang, Chuo-Ling Chang, Ming Guang Yong, Juhyun Lee, Wan-Teh Chang, Wei Hua, Manfred Georg, Matthias Grundmann CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560449334246-H6WDS2655VIDAUGSTF5B/23_CVPRW_hair_segmentation.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Human Hair Segmentation In The Wild Using Deep Shape Prior</image:title>
      <image:caption>Yongzhe Yan, Anthony Berthelier, Stefan Duffner, Xavier Naturel, Christophe Garcia, Thierry Chateau CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560449451433-8O6YY4RXP4RFK3OQUBF4/24_CVPR2019_Hair_Segmentation_v2.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Real-time Hair segmentation and recoloring on Mobile GPUs</image:title>
      <image:caption>Andrei Tkachenka, Gregory Karpiak, Andrey Vakunov, Yury Kartynnik, Artsiom Ablavatski, Valentin Bazarevsky, Siargey Pisarchyk CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560449707075-UZ91IJ1VKCZGX0WZV69F/26_DeepGestures_CVPR_Workshop_2019.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Efficient 2.5D Hand Pose Estimation via Auxiliary Multi-Task Training for Embedded Devices</image:title>
      <image:caption>Prajwal Chidananda, Ayan Sinha, Adithya Rao, Douglas Lee, Andrew Rabinovich CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560449781231-G96E41P263HO54A1GAIZ/27_younes_final.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - The Advantages of a Joint Direct and Indirect VSLAM in AR</image:title>
      <image:caption>Georges Younes, Daniel Asmar, John Zelek CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560449859365-OAHVPLWWF7IGAB87O5L1/28_holopose_ARVR_camera_ready.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - HoloPose: Real Time Holistic 3D Human Reconstruction In-The-Wild</image:title>
      <image:caption>Rıza Alp Güler, George Papandreou, Dan Stoddart, Stefanos Zafeiriou, Iasonas Kokkinos CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560449999444-KUTGJQIPJ8VWOY0Y2KKD/30_Generating_Spatial_Attention_Cues_via_Illusory_Motion.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Generating Spatial Attention Cues via Illusory Motion</image:title>
      <image:caption>Janus Nørtoft Jensen, Morten Hannemose, Jakob Wilm, Anders Bjorholm Dahl, Jeppe Revall Frisvad, Serge Belongie CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF | Website</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560450140012-M77ZV8P427NMBJKHVPHY/32_Annotate_all__A_Perspective_Preserved_Asynchronous__Annotation_System_for_Collaborative_Augmented_Reality.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Annotate All! A Perspective Preserved Asynchronous Annotation System for Collaborative Augmented Reality</image:title>
      <image:caption>Po Yen Tseng, Harald Haraldsson, Serge Belongie CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560450254965-DO1QBELGPT1F6R00YSEF/35_Ran_Sun_CVPR_Submission.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Anon-Emoji: An Optical See-Through Augmented Reality System for Reducing Appearance Bias in Social Interactions</image:title>
      <image:caption>Ran Sun, Harald Haraldsson, Yuhang Zhao, Serge Belongie CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560450357194-3U4B419H4FZI6I2E3HQO/36_CameraReady.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Context Aware Recommendations Embedded in Augmented Viewpoint to Retarget Consumers in V-Commerce</image:title>
      <image:caption>Kumar Ayush CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560450431393-8OYFH5V52NTDSVPZ2YCA/38_Deep_Space_Time_Prior_for_Novel_View_Synthesis.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - Deep Space-Time Prior for Novel View Synthesis</image:title>
      <image:caption>Zain Shah CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560602877201-NBAGOFAFB82BP3ZXXF4P/18_CV4AVR19_MCoAVR_camera.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - A Lightweight Mobile Remote Collaboration Using Mixed Reality</image:title>
      <image:caption>Jeremy Venerella, Lakpa Sherpa, Hao Tang, Zhigang Zhu CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1560603056555-ZTUAIUYY9QPJPSEBV31F/20_CV4ARVR_19_Abstract-2.jpg</image:loc>
      <image:title>CV4ARVR 2019 - Demos and Posters Gallery - 3D Scene Generation From Real-world Images</image:title>
      <image:caption>Flora Tasse, Pavan Kamaraju, Ghislain Fouodji CVPR Workshop on Computer Vision for Augmented and Virtual Reality, Long Beach, CA, 2019. PDF</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/disciplines</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2019-08-21</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1566417951628-AB1J3FLDBCILGWWTLFSI/computer+vision+cv+icon.png</image:loc>
      <image:title>Disciplines - Computer Vision</image:title>
      <image:caption />
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1566417951628-AB1J3FLDBCILGWWTLFSI/computer+vision+cv+icon.png</image:loc>
      <image:title>Disciplines - Computer Vision</image:title>
      <image:caption />
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1566417951575-H8M526AJP3857B4PTEAW/computer+graphics+cg+icon.png</image:loc>
      <image:title>Disciplines - Computer Graphics</image:title>
      <image:caption />
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1566417951703-EBOIBL5GCHRQTQ91GF8A/human+computer+interaction+hci+icon.png</image:loc>
      <image:title>Disciplines - Human-Computer Interaction</image:title>
      <image:caption />
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/gallery-2020-speakers</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2023-06-20</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591924925092-0FP0R0ICS76D9KOTPF13/image-asset.png</image:loc>
      <image:title>Gallery: 2020 Program Items - OPENING REMARKS</image:title>
      <image:caption>Fernando De la Torre (Facebook, CMU)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591924925092-0FP0R0ICS76D9KOTPF13/image-asset.png</image:loc>
      <image:title>Gallery: 2020 Program Items - OPENING REMARKS</image:title>
      <image:caption>Fernando De la Torre (Facebook, CMU)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591926631140-HVENYNBNMMZBBB4ACPAY/image-asset.jpeg</image:loc>
      <image:title>Gallery: 2020 Program Items - Jamie Shotton (Microsoft)</image:title>
      <image:caption>Talk: Human Understanding for HoloLens (Watch) Abstract: Mixed reality devices such as HoloLens offer new and exciting holographic experiences and capabilities. Underpinning these experiences is a rich understanding of the user, powered by state of the art computer vision algorithms. In this talk we'll explore some of the challenges we faced in bringing those algorithms into HoloLens 2, as well as taking a glimpse at some of the exciting opportunities for computer vision to impact future devices and enable transformative experiences such remote presence. Bio: Jamie Shotton is Partner Director of Science at Microsoft. He leads the Mixed Reality &amp; AI Labs in Cambridge and Belgrade, where his team incubates transformative new technologies and experiences from early stage research to shipping product. He studied Computer Science at the University of Cambridge, where he remained for his PhD in computer vision and machine learning, before joining Microsoft in 2008. His research focuses at the intersection of computer vision, AI, machine learning, and graphics, with particular emphasis on systems that understand the motion, shape, and appearance of people in 3D. He has explored applications of this work for mixed reality, virtual presence, human-computer interaction, gaming, and healthcare. He has shipped foundational features in multiple products including body tracking for Kinect and the hand- and eye-tracking that enable HoloLens 2’s instinctual interaction model. He has received multiple Best Paper and Best Demo awards at top-tier academic conferences. His work on Kinect was awarded the Royal Academy of Engineering’s gold medal MacRobert Award in 2011, and he shares Microsoft’s Outstanding Technical Achievement Award for 2012 with the Kinect engineering team. In 2014 he received the PAMI Young Researcher Award, and in 2015 the MIT Technology Review Innovator Under 35 Award.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591926736758-CDIDIP1PHEIMNIIT07IW/image-asset.png</image:loc>
      <image:title>Gallery: 2020 Program Items - Shiri Azenkot (Cornell Tech)</image:title>
      <image:caption>Talk: Augmenting Reality to Support People with Low Vision in Daily Tasks (Watch) Abstract: How can advances in computer vision and augmented reality help people with visual impairments? In my research, I study the experiences of people with visual impairments and design applications to help them overcome challenges. I’ll present two augmented reality applications that my students and I designed for people with low vision, who have a visual impairment that falls short of blindness. The first application help the user find product in a grocery store and the second helps her navigate through the built-environment. Both applications show how computer vision and AR can be leveraged to augment the user’s perception and support fundamental visual tasks. Bio: Shiri Azenkot is an Assistant Professor at the Jacobs Technion-Cornell Institute at Cornell Tech and in the Information Science field at Cornell University. She is also on the faculty at Technion – Israel Institute of Technology. She is broadly interested in human-computer interaction and accessibility. Professor Azenkot’s research focuses on enabling people with disabilities to have equal access to information via mobile and wearable devices. She received a Ph.D. in Computer Science from the University of Washington in 2014, where she was awarded the Graduate School Medal, an NSF Graduate Research Fellowship, and an AT&amp;T Labs Graduate Fellowship. She also holds a B.A. in Computer Science from Pomona College.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591925507004-HW22T641C6AGGIEABH9R/shortbreak.jpg</image:loc>
      <image:title>Gallery: 2020 Program Items - SHORT BREAK</image:title>
      <image:caption />
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591924030313-Q06LELWCWJPGX4PM7MSY/image-asset.jpeg</image:loc>
      <image:title>Gallery: 2020 Program Items - Steve Seitz (University of Washington, Google)</image:title>
      <image:caption>Talk: Slow Glass (Watch) Abstract: Wouldn’t it be fascinating to be in the same room as Abraham Lincoln, visit Thomas Edison in his laboratory, or step onto the streets of New York a hundred years ago? We explore this thought experiment, by tracing ideas from science fiction through newly available data sources that may facilitate this goal. Bio: Steve Seitz is Robert E. Dinning Professor in the Allen School at the University of Washington. He is also a Director on Google's Daydream team, where he leads teleportation efforts including Google Jump and Cardboard Camera. He received his B.A. in computer science and mathematics at the University of California, Berkeley in 1991 and his Ph.D. in computer sciences at the University of Wisconsin in 1997. Following his doctoral work, he did a postdoc at Microsoft Research, and then a couple years as Assistant Professor in the Robotics Institute at Carnegie Mellon University. He joined the faculty at the University of Washington in July 2000. His co-authored papers have won the David Marr Prize (twice) at ICCV, and the CVPR 2015 best paper award. He received an NSF Career Award, and ONR Young Investigator Award, an Alfred P. Sloan Fellowship, and is an IEEE Fellow and an ACM Fellow. His work on Photo Tourism (joint with Noah Snavely and Rick Szeliski) formed the basis of Microsoft's Photosynth technology. Professor Seitz is interested in problems in 3D computer vision and computer graphics, and their application to virtual and augmented reality.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591927145715-T0IQNEDVBFDK4CVNP7OW/image-asset.jpeg</image:loc>
      <image:title>Gallery: 2020 Program Items - Flora Tasse (Streem)</image:title>
      <image:caption>Talk: Computer Vision For Remote AR (Watch) Abstract: Remote AR unlocks a new set of experiences and use cases for customers. One use case in particular is remote collaboration where an expert in a different location can help an onsite customer with a task in the customer environment. Streem focuses precisely on this, and enable improved customer experiences with AR-supported video tools. This talk will dive into how Remote AR is used to unlock a new type of customer engagement, and the Computer Vision research problems that support these experiences. Bio: Flora is the Head of CV/AR at Streem. She specialises in AI applied to Computer Graphics and Vision problems faced in AR/VR. Her team at Streem is making the mobile phone's camera more intelligent, by building AI agents that can understand images/videos and augment them with relevant interactive virtual content. She joined Streem, after it acquires her startup Selerio, which was spun out of her PhD work at Cambridge University. At Cambridge, Flora research focused on 3D shape retrieval using different query types such as 3D models, images/sketches and range scans. This work was awarded the 2013 Google Doctoral Fellowship in Computer Graphics and published in various top-tier venues, including ICCV and SIGGRAPH Asia. She served on several international program committees such as ICLR and Eurographics. Notably she was Paper Chair of the 2019 Black in AI workshop, co-located with NeurIPS. She was recently named among the Rework Top 30 UK Women in AI and appeared on Computer Weekly Most Influential women in UK Tech longlist.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591925567490-ISELVXCYN2UQUFQV2KDU/image-asset.png</image:loc>
      <image:title>Gallery: 2020 Program Items - PANEL DISCUSSION #1</image:title>
      <image:caption>Moderators: Andrew Rabinovich (Magic Leap), Serge Belongie (Cornell) Panelists: Jamie Shotton, Shiri Azenkot, Steve Seitz, Flora Ponjou Tasse (Watch)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591925765552-9DQ5I6LYRPLL3ZGPZB44/image-asset.png</image:loc>
      <image:title>Gallery: 2020 Program Items - LUNCH BREAK</image:title>
      <image:caption />
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591925817661-IWQX5EJ0FULR81UZPY4X/image-asset.png</image:loc>
      <image:title>Gallery: 2020 Program Items - SPOTLIGHT VIDEOS</image:title>
      <image:caption>Playback of video submissions, see papers for full submissions. Moderator: Harald Haraldsson (Cornell Tech) Watch all videos here (YouTube playlist)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591925927956-7IS2QFS8RVVOZS02L3ML/image-asset.png</image:loc>
      <image:title>Gallery: 2020 Program Items - VIRTUAL POSTER SESSIONS</image:title>
      <image:caption>The Virtual Poster Sessions will take place on Discord, Friday June 19, 2020. Moderator: Harald Haraldsson (Cornell Tech) See here for details.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591921184643-2HCCANII8PZ0EHIS6XJ4/Michael%2BB%252BW.jpg</image:loc>
      <image:title>Gallery: 2020 Program Items - Michael Abrash (Facebook)</image:title>
      <image:caption>Talk: Computer Vision for the Future of Social Presence (Watch) Bio: Michael Abrash is the Chief Scientist of Facebook Reality Labs, a research laboratory that brings together a world-class R&amp;D team of scientists, developers and engineers to build the future of connection within virtual and augmented reality. He was graphics lead for the first two versions of Windows NT, teamed with John Carmack on Quake, worked on the first two versions of Microsoft’s Xbox, and helped develop virtual reality at Valve. He is also the author of several books, including Michael Abrash’s Programming Black Book.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591927488901-NX2158D3GZV9YLC2HVMZ/image-asset.jpeg</image:loc>
      <image:title>Gallery: 2020 Program Items - Cristian Sminchisescu (Google, Lund University)</image:title>
      <image:caption>Talk: GHUM, Interactions, and Active Human Sensing Bio: Cristian Sminchisescu is a Research Scientist leading a team at Google, and a Professor at Lund University. He has obtained a doctorate in computer science and applied mathematics with focus on imaging, vision and robotics at INRIA, under an Eiffel excellence fellowship of the French Ministry of Foreign Affairs, and has done postdoctoral research in the Artificial intelligence Laboratory at the University of Toronto. He has held a Professor equivalent title at the Romanian Academy and a Professor rank, status appointment at Toronto, and has advised research at both institutions. During 2004-07, he was a faculty member at the Toyota Technological Institute at the University of Chicago, and later on the Faculty of the Institute for Numerical Simulation in the Mathematics Department at Bonn University. Cristian Sminchisescu regularly serves as an Area Chair for computer vision and machine learning conferences (CVPR, ECCV, ICCV, AAAI, NeurIPS), as a Program Chair for ECCV 2018, and an Associate Editor of IEEE Transactions for Pattern Analysis and Machine Intelligence (PAMI) and the International Journal of Computer Vision (IJCV). Over time, his work has been funded by the US National Science Foundation, the Romanian Science Foundation, the German Science Foundation, the Swedish Science Foundation, the European Commission under a Marie Curie Excellence Grant, and the European Research Council under an ERC Consolidator Grant. Cristian Sminchisescu's research interests are in the area of computer vision (3d human sensing, reconstruction and recognition) and machine learning (optimization and sampling algorithms, kernel methods and deep learning). The visual recognition methodology developed in his group was a winner of the PASCAL VOC object segmentation and labeling challenge during 2009-12, as well as the Reconstruction Meets Recognition Challenge (RMRC) 2013-14. His work on deep learning of graph matching has received the best paper award honorable mention at CVPR 2018.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591927411267-BQ6GAJTFV2P8GAA312U8/image-asset.png</image:loc>
      <image:title>Gallery: 2020 Program Items - SHORT BREAK</image:title>
      <image:caption />
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591921389688-OLF9VA8TEBG78AI5ELMW/Carol%2BO%27Sullivan.jpeg</image:loc>
      <image:title>Gallery: 2020 Program Items - Carol O’Sullivan (Trinity College, Dublin)</image:title>
      <image:caption>Talk: Compelling physical interactions in Mixed Reality (Watch) Abstract: The problem of simulating virtual entities within a dynamically changing real world remains a significant open challenge. We are still far from being able to emulate the rich, physical experience of interacting with real objects, environments and people. Mixed Reality is therefore still a developing field, with most research focussed on developing computer vision and AI technologies for capturing and analysing video of the real world and augmenting it with virtual content. There is now a strong impetus and opportunity to abstract beyond the technological considerations and to consider the convergence of human perception of causality, computer vision, motion capture, computer animation, artificial intelligence and other related fields to deliver compelling physical interactions in Mixed Reality. In this talk I will present some ideas and recent research towards this goal. Bio: Carol O'Sullivan is the Professor of Visual Computing in Trinity College Dublin. From 2013-2016 she was a Senior Research Scientist at Disney Research in Los Angeles, and spent a sabbatical year as Visiting Professor in Seoul National University from 2012-2013. Her research interests include graphics, AR/VR, perception, Computer Animation, Crowd and Human simulation. She has been a member of many editorial boards and international program committees (including ACM SIGGRAPH and Eurographics), and has served as Editor in Chief for the ACM Transactions on Applied Perception (TAP) from 2006-2012. She has been program or general chair for several conferences, including the annual Eurographics conference, the ACM Symposium on Computer Animation, and the Courses Chair for ACM SIGGRAPH Asia 2018. Prior to her PhD studies, she spent several years in industry working in Software Development. She was elected a fellow of Trinity College in 2003 and of Eurographics in 2007.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591927796086-ZG2SWZSZ75M9CFZPAHVK/image-asset.png</image:loc>
      <image:title>Gallery: 2020 Program Items - Ken Perlin (New York University)</image:title>
      <image:caption>Talk: How to Build a Holodeck (Watch) Abstract: In the age of COVID-19 it is more clear than ever that there is a compelling need for better remote collaboration. Fortunately a number of technologies are starting to converge which will allow us to take such collaborations to a whole new level. Imagine that when you join an on-line meeting you are present with your entire body, and that you can see and hear other people as though you are all in the same room. There are many challenges to realizing this vision properly. The NYU Future Reality Lab and its collaborators are working on many of them. This talk will give an overview of many of the key areas of research, including how to guarantee universal accessibility, user privacy and rights management, low latency networking, design and construction of shared virtual worlds, correct rendering of spatial audio, biometric sensing, and a radical rethinking of user interface design. Bio: Ken Perlin, a professor in the Department of Computer Science at New York University, directs the Future Reality Lab, and is a participating faculty member at NYU MAGNET. His research interests include future reality, computer graphics and animation, user interfaces and education. He is chief scientist at Parallux, Tactonic Technologies and Autotoon. He is an advisor for High Fidelity and a Fellow of the National Academy of Inventors. He received an Academy Award for Technical Achievement from the Academy of Motion Picture Arts and Sciences for his noise and turbulence procedural texturing techniques, which are widely used in feature films and television, as well as membership in the ACM/SIGGRAPH Academy, the 2020 New York Visual Effects Society Empire Award the 2008 ACM/SIGGRAPH Computer Graphics Achievement Award, the TrapCode award for achievement in computer graphics research, the NYC Mayor's award for excellence in Science and Technology and the Sokol award for outstanding Science faculty at NYU, and a Presidential Young Investigator Award from the National Science Foundation. He serves on the Advisory Board for the Centre for Digital Media at GNWC. Previously he served on the program committee of the AAAS, was external examiner for the Interactive Digital Media program at Trinity College, general chair of the UIST2010 conference, directed the NYU Center for Advanced Technology and Games for Learning Institute, and has been a featured artist at the Whitney Museum of American Art. He received his Ph.D. in Computer Science from NYU, and a B.A. in theoretical mathematics from Harvard. Before working at NYU he was Head of Software Development at R/GREENBERG Associates in New York, NY. Prior to that he was the System Architect for computer generated animation at MAGI, where he worked on TRON.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591927991976-A2T68NV53XKR4T9O6ME6/image-asset.png</image:loc>
      <image:title>Gallery: 2020 Program Items - PANEL DISCUSSION #2</image:title>
      <image:caption>Moderators: Sofien Bouaziz (Google), Matt Uyttendaele (Facebook) Panelists: Michael Abrash, Cristian Sminchisescu, Carol O’Sullivan, Ken Perlin (Watch)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1591928178255-BOOWXYS0RUY5F9WCD6EL/image-asset.png</image:loc>
      <image:title>Gallery: 2020 Program Items - FINAL REMARKS</image:title>
      <image:caption>Fernando De la Torre (Facebook, CMU)</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/gallery-keynote-videos</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2020-06-19</lastmod>
    <image:image>
      <image:loc>https://static1.squarespace.com/static/5c3f69e1cc8fedbc039ea739/t/5eecff3a6f5c3c65afbcd438/1592374786720/</image:loc>
      <image:title>Gallery: Keynote and Panel Videos (CV4ARVR 2020) - YouTube</image:title>
    </image:image>
    <image:image>
      <image:loc>https://static1.squarespace.com/static/5c3f69e1cc8fedbc039ea739/5ee9b5d96bf6343b4c5f5f4f/5ee9b5eff7dd700403de225f/1592374786720/</image:loc>
      <image:title>Gallery: Keynote and Panel Videos (CV4ARVR 2020) - YouTube</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1592502909035-MTQ93X3ID71Q07F9JT8T/image-asset.jpeg</image:loc>
      <image:title>Gallery: Keynote and Panel Videos (CV4ARVR 2020) - Jamie Shotton - Human Understanding for HoloLens (CV4ARVR 2020)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1592502928405-XRIEYRPI1CJHL4H4B8NN/image-asset.jpeg</image:loc>
      <image:title>Gallery: Keynote and Panel Videos (CV4ARVR 2020) - Shiri Azenkot - Augmenting Reality to Support People with Low Vision in Daily Tasks (CV4ARVR 2020)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1592503687877-F9VTQ87TKUPU1DQNRU0B/image-asset.jpeg</image:loc>
      <image:title>Gallery: Keynote and Panel Videos (CV4ARVR 2020) - Steve Seitz - Slow Glass (CV4ARVR 2020)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1592503005947-KE6CUSQWSZSCV9QKZC0K/image-asset.jpeg</image:loc>
      <image:title>Gallery: Keynote and Panel Videos (CV4ARVR 2020) - Flora Tasse - Computer Vision For Remote AR (CV4ARVR 2020)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1592590074300-3L2ZIC63T8Q97538444X/image-asset.jpeg</image:loc>
      <image:title>Gallery: Keynote and Panel Videos (CV4ARVR 2020) - Panel Discussion #1 (CV4ARVR 2020)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1592503034036-3NRM5S026TBPJTL1ZOIZ/image-asset.jpeg</image:loc>
      <image:title>Gallery: Keynote and Panel Videos (CV4ARVR 2020) - Michael Abrash - Computer Vision for the Future of Social Presence (CV4ARVR 2020)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1592503068706-99IIE2LQ0A85YRWGO9UI/image-asset.jpeg</image:loc>
      <image:title>Gallery: Keynote and Panel Videos (CV4ARVR 2020) - Carol O'Sullivan - Compelling physical interactions in Mixed Reality (CV4ARVR 2020)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1592503097576-8VOBGRRQ661KWZN0WWM6/image-asset.jpeg</image:loc>
      <image:title>Gallery: Keynote and Panel Videos (CV4ARVR 2020) - Ken Perlin - How to Build a Holodeck (CV4ARVR 2020)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1592590119466-51SC6KLDRGVZ1188XRHX/image-asset.jpeg</image:loc>
      <image:title>Gallery: Keynote and Panel Videos (CV4ARVR 2020) - Panel Discussion #2 (CV4ARVR 2020)</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/indext-test</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2020-07-09</lastmod>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2021/sponsors</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2021-06-02</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>2021 Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>2021 Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622661883058-YFJXP5EEB17LD7TVHLKQ/headroom_logo.png</image:loc>
      <image:title>2021 Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622664892300-OTL3L1102RSAYRAJMMS8/image-asset.png</image:loc>
      <image:title>2021 Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665706129-INN6TAZRHNXQW5INZN3J/image-asset.png</image:loc>
      <image:title>2021 Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665233780-RPKJ8DVJKW10PRBXSQ1O/image-asset.png</image:loc>
      <image:title>2021 Sponsors</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/2021-speakers</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2023-06-20</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1626469395398-00167NOKDC66DX38XRDL/transparent.png</image:loc>
      <image:title>2021 Speakers - Opening Remarks</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1626469395398-00167NOKDC66DX38XRDL/transparent.png</image:loc>
      <image:title>2021 Speakers - Opening Remarks</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1623791205103-AR4CZKP96U8UE95OQ9H8/Rachel+McDonnell.jpg</image:loc>
      <image:title>2021 Speakers - Rachel McDonnell (Trinity College Dublin)</image:title>
      <image:caption>Talk: Should we tread softly across the uncanny valley? Abstract: In recent days, virtual humans are increasing in popularity across many different domains. Besides their traditional use in gaming, and VFX, we are now seeing a huge increase in their use in newer applications such as AR/VR, video conferencing, social media influencers and youtubers, virtual assistants, or for therapy/learning. In the next 5 years, we will become much more accustomed to conversing with virtual humans for a range of tasks. However, research on the perception of virtual humans is not advancing as quickly, and we are lacking knowledge on how humans really perceive these types of interactions - do they register as real human encounters or an interaction with a robot? Achieving photorealism will surely make this distinction more difficult in the future. In this talk, I will discuss some of our recent research on the perception of virtual and augmented humans, focusing on the effect of photorealism. Bio: Rachel McDonnell is an Associate Professor of Creative Technologies at Trinity College Dublin, and a principal investigator with ADAPT, Trinity’s Centre for AI-driven Digital Content Technology. She combines research in cutting-edge computer graphics and investigating the perception of virtual characters to both deepen our understanding of how virtual humans are perceived, and directly provide new algorithms and guidelines for industry developers on where to focus their efforts. She has published over 100 papers in conferences and journals in her field, including many top-tier publications at venues such as SIGGRAPH, Eurographics, TOCHI, and IEEE TVCG, etc. She has served as Associate Editor journals such as ACM Transactions on Applied Perception and Computer Graphics Forum, and a regular member of many international program committees (including ACM SIGGRAPH and Eurographics). She was recently elected a Fellow of Trinity College Dublin.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1625698157336-KF9IK6WT5RTBLSCVY3V1/grundmann.jpg</image:loc>
      <image:title>2021 Speakers - Matthias Grundmann (Google)</image:title>
      <image:caption>Talk: On-device ML solutions for Mobile and Web Abstract: In this talk, I will present several on-device Machine Learning (ML) solutions for mobile and web that are powering a wide range of impactful Google Products. On-device ML has major benefits enabling low-latency, offline and privacy-preserving approaches. However, to ship these solutions in production, we need to overcome substantial technical challenges to deliver on-device ML in real-time and with low-latency. Once solved, our solutions power applications like background replacement and light adjustment in Google Meet, AR effects in YouTube and Duo, gesture controls of devices and view-finder tracking for Google Lens and Translate. In this talk, I will cover some of the core-recipes behind Google’s on-device ML solutions, from model design over enabling ML solutions infrastructure (MediaPipe) to on-device ML inference acceleration. In particular we will be covering video segmentation, face meshes and iris tracking, hand tracking for gesture control and body tracking to power 3D avatars. The covered solutions are also available to the research and developer community via MediaPipe, —an open source cross platform framework for building customizable ML pipelines for mobile, web, desktop and python. Bio: Matthias Grundmann is a Director of Research at Google working in the area of Machine Learning, Computer Vision and Computational Video. He is leading a vertical team of ~40 Applied ML and Software Engineers with focus on Machine Learning solutions for Live ML (low-latency, on-device and real-time). His team develops high-quality, cross-platform ML solutions (MediaPipe) driven by GPU/CPU accelerated ML inference (TFLite GPU and XNNPack) for mobile and web. Among the wide portfolio of technologies his team develops are solutions for hand and body tracking, high-fidelity facial geometry and iris estimation, video segmentation for Google Meet and YouTube, 2D object and calibration-free 6 DOF camera tracking, 3D object detection, Motion Photos and Live Photo stabilization. Matthias received his Ph.D. from the Georgia Institute of Technology in 2013 for his work on Computational Video with focus on Video Stabilization and Rolling Shutter removal for YouTube. His work on Rolling Shutter removal won the best paper award at ICCP, 2012. He was the recipient of the 2011 Ph.D. Google Fellowship in Computer Vision.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1626469441126-L5E3BZ0C1502CE0C3A4O/transparent.png</image:loc>
      <image:title>2021 Speakers - Short Break</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1626386804209-4B17A5PK21MTPH2CSJKA/kaan_aksit.png</image:loc>
      <image:title>2021 Speakers - Kaan Akşit (University College London)</image:title>
      <image:caption>Talk: Towards Unifying Display Experiences with Computer-Generated Holography Abstract: From smartphones to desktop computers, display technologies play a crucial role in shaping how we exchange visual information. The most significant challenges in display technologies are allowing most users to access a more extensive set of comfortable visual experiences and generating authentic three-dimensional visual experiences inherent to the human visual system. This talk's overarching aim is to formulate a new research ground to address these issues by inventing and co-designing proof-of-concept hardware and software for the future's display. A common consensus among academia and industry is that a genuine holographic display representing light fields is the future's immersive display. Hence, computer-generated holography will be at the centre of focus in this talk. Bio: Kaan Akşit is an Associate Professor at University College London. Kaan received his PhD degree in electrical engineering at Koç University, Turkey, in 2014, his M.Sc. degree in electrical power engineering from RWTH Aachen University, Germany, in 2010, and his B.S. degree in electrical engineering from Istanbul Technical University, Turkey, in 2007. Kaan researches the intersection of light and computation, including computational approaches in imaging, fabrication and displays. Kaan’s research works are most known among the optics and graphic community for his contributions to display technologies dedicated to virtual reality, augmented reality, and three-dimensional displays with glasses and without glasses. He worked as a research intern in Philips Research, the Netherlands, and Disney Research, Switzerland, in 2009 and 2013, respectively. In addition, he was a scientist at NVIDIA, the USA, between 2014 and 2020. He is the recipient of Emerging Technologies best in show awards in SIGGRAPH 2019 and SIGGRAPH 2018, DCEXPO special prize in SIGGRAPH 2017, and among the best papers in IEEE VR 2021, IEEE VR 2019, ISMAR 2018, and IEEE VR 2017.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1624542762219-WMNUPAZQDI7Q6TC77SLU/PerozChristophe.jpg</image:loc>
      <image:title>2021 Speakers - Christophe Peroz (Sony)</image:title>
      <image:caption>Talk: What can AR/MR display do and not do in 2021? Bio: Christophe Peroz has recently moved from the Bay Area to Tokyo to join Sony Group R&amp;D center to work on the development of xR display. Christophe has led R&amp;D projects in industry (Magic Leap, aBeam Tech, Saint Gobain) and academy (Berkeley Lab, CNRS) and has been involved in the development of several products from early-stage concept to commercialization. Since 2015, he is focusing on the development of xR display to enable the next technological revolution. Christophe is co-author of 100+ publications and patents and received his PhD in Applied Physics from Grenoble Alpes University, France. He serves on several scientific committees and is co-chair of SPIE AR/VR/MR conference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1624542905648-LCI0IBX90S3CSWMRQZF0/Hirohi+Mukawa_300dpi.JPG</image:loc>
      <image:title>2021 Speakers - Hiroshi Mukawa (Sony)</image:title>
      <image:caption>Talk: What can AR/MR display do and not do in 2021? Bio: Hiroshi Mukawa is currently responsible for AR/MR HMD technology development at Sony Group R&amp;D center as a corporate distinguished engineer and heading the AR display module business at Sony Semiconductor Solutions Corporation as a general manager. In 2004, he started his research on optical see-through HMDs and has been leading all the AR eyewear product development and commercialization including the world’s first waveguide-based optical see-through closed caption glasses in 2012. He has over 150 patent families related to optics and mechanics in the fields of an AR/MR HMD and optical disc storage. He serves as an executive committee member of the SPIE AR/VR/MR conference. He received M.S. degrees in electrical engineering and physical engineering from Stanford University and Kyoto University respectively.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1626469536060-I5V07S4S0J1EK9XPJ89V/transparent.png</image:loc>
      <image:title>2021 Speakers - Panel Discussion #1</image:title>
      <image:caption>Rachel McDonnell (Trinity College Dublin), Matthias Grundmann (Google) Kaan Akşit (University College London) Christophe Peroz (Sony) Hiroshi Mukawa (Sony)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1626469599050-0UQTWYVHQSK2HRHJWQXX/transparent.png</image:loc>
      <image:title>2021 Speakers - Lunch Break</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1626469599050-0UQTWYVHQSK2HRHJWQXX/transparent.png</image:loc>
      <image:title>2021 Speakers - Spotlight Videos</image:title>
      <image:caption>Playback of Spotlight Videos. Moderator: Harald Haraldsson (Cornell Tech) Submit questions to the authors via Zoom Q&amp;A. See the videos on YouTube here.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1626469700323-RKF2DIPBZUGNG7XB5YM1/transparent.png</image:loc>
      <image:title>2021 Speakers - Virtual Poster Session (Discord)</image:title>
      <image:caption>Meet the authors on Discord. Moderator: Harald Haraldsson (Cornell Tech) Details here.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1623679952567-L08ILPMQXXUZKOXNG49H/Ira_Kemelmacher.jpg</image:loc>
      <image:title>2021 Speakers - Ira Kemelmacher-Shlizerman (University of Washington)</image:title>
      <image:caption>Prof. Kemelmacher-Shlizerman is a Scientist and Entrepreneur. Ira's interests are in the intersection of computer vision, computer graphics, and learning. She is an Associate Professor at the Allen School University of Washington, Director of the UW Reality Lab, and lead for a moonshot project at Google. Previously she founded a startup Dreambit that was acquired by Facebook in 2016, and spent two years at Facebook as Research Scientist building products between 2016-2018. Before that in 2011 Ira tech transfered product Face Movies to Google. Ira's Ph.D is in computer science and applied mathematics from the Weizmann Institute of Science. Her works were awarded the Google faculty award, Madrona prize, the GeekWire Innovation of the Year Award, 2016, selected to the covers of CACM and SIGGRAPH, and frequently covered by most national and international media. She has been serving as area chair and technical committee of both CVPR and SIGGRAPH, and part of Expert Network, LDV capital. Ira teaches computer vision, computer graphics, and the popular novel AR/VR class.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1632855031802-39GEU2NI4OYGXV3ZA6U9/Danny+Lange+%281%29.jpg</image:loc>
      <image:title>2021 Speakers - Sujoy Ganguly (Unity)</image:title>
      <image:caption>Talk: Customizable Computer Vision Expands Data Access Without Compromising Privacy Abstract: In recent years, computer vision has made huge strides, helped by large-scale labeled datasets. However, these datasets had no guarantees or analysis diversity. Additionally, privacy concerns may limit the ability to collect more data. These problems are particularly acute in human-centric computer vision for AR/VR applications. An emerging alternative to real-world data that alleviates some of these issues is synthetic data. However, the creation of synthetic data generators is incredibly challenging and prevents researchers from exploring their usefulness. To promote research into the use of synthetic data, we release a set of data generators for computer vision. We found that pre-training a network using synthetic data and fine-tuning on real-world target data results in models that outperform models trained with the real data alone. Furthermore, we find remarkable gains when limited real-world data is available. Join us to learn how these freely available data generators should enable a wide range of research into the emerging field of simulation to real transfer learning for computer vision. Bio: Sujoy Ganguly is the Head of Applied Machine Learning Research at Unity Technologies. Sujoy earned his Ph.D. in Applied Mathematics and Theoretical Physics from the University of Cambridge, understanding collective dynamics and transport phenomena in biological systems. After his Ph.D. Sujoy was a postdoctoral fellow at Yale University working in computational neuroscience. He has many years of industrial experience bringing the next generation of AI-driven technologies to market while publishing papers at major AI conferences. At Unity technologies, he is leading efforts in using simulated data to train AI that performs real-world tasks.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1626469795606-OHAP34Q3WOR11CMNLJIF/transparent.png</image:loc>
      <image:title>2021 Speakers - Short Break</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1624543352111-0ZMDFJ8DCWM0627QXSP5/grauman.jpg</image:loc>
      <image:title>2021 Speakers - Kristen Grauman (UT Austin)</image:title>
      <image:caption>Talk: First-Person Video for Interaction Learning Bio: Kristen Grauman is a Professor in the Department of Computer Science at the University of Texas at Austin and a Research Scientist in Facebook AI Research (FAIR). Her research in computer vision and machine learning focuses on video, visual recognition, and action for perception or embodied AI. Before joining UT-Austin in 2007, she received her Ph.D. at MIT. She is an IEEE Fellow, AAAI Fellow, Sloan Fellow, a Microsoft Research New Faculty Fellow, and a recipient of NSF CAREER and ONR Young Investigator awards, the PAMI Young Researcher Award in 2013, the 2013 Computers and Thought Award from the International Joint Conference on Artificial Intelligence (IJCAI), the Presidential Early Career Award for Scientists and Engineers (PECASE) in 2013. She was inducted into the UT Academy of Distinguished Teachers in 2017. She and her collaborators have been recognized with several Best Paper awards in computer vision, including a 2011 Marr Prize and a 2017 Helmholtz Prize (test of time award). She currently serves as an Associate Editor-in-Chief for the Transactions on Pattern Analysis and Machine Intelligence (PAMI) and as an Editorial Board member for the International Journal of Computer Vision (IJCV). She previously served as a Program Chair of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2015 and a Program Chair of Neural Information Processing Systems (NeurIPS) 2018 and will serve as a Program Chair of the IEEE International Conference on Computer Vision (ICCV) 2023.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1623679840033-CIGI5FTW8E8755ZZ7TL8/Evan+Nisselson.jpg</image:loc>
      <image:title>2021 Speakers - Evan Nisselson (LDV Capital)</image:title>
      <image:caption>Talk: Investment Trends &amp; Opportunities from Content Tools, to Digital Beings and the Metaverse? Abstract: We are witnessing a massive shift in content creation. By 2027, visual tech tools will automate the technical skills required today for content creation and monetization. They will power the rise of the metaverse. We look forward to hearing your insights, learning about your startups, and reading your research papers on how businesses are addressing these challenges and opportunities. https://www.ldv.co/ Bio: Evan is General Partner at LDV Capital which invests in people building businesses powered by visual technologies from computer vision, machine learning and artificial intelligence that analyze visual data. We invest at the earliest stages of a company typically with a prototype or some initial customer validation. Some example visual technology verticals: Photonics, Autonomous Vehicles, Mapping, Robotics, Food/Agriculture, Augmented Reality, Logistics, Manufacturing, Search, Security, Entertainment, Healthcare and much more. The unique LDV Capital platform includes an annual LDV Vision Summit, LDV Community, annual LDV Insights reports and extensive expert network. Evan is a serial entrepreneur, professional photographer and digital media expert since the early 1990's. His international expertise ranges from building four visual technology businesses to assisting technology startups in raising capital, business development, marketing and product development. He is a frequent speaker, moderator and master of ceremonies at technology conferences.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1626469877361-IKHAVK48DNJ5CNVNLYZ0/transparent.png</image:loc>
      <image:title>2021 Speakers - Panel Discussion #2</image:title>
      <image:caption>Sujoy Ganguly (Unity) Kristen Grauman (UT Austin) Evan Nisselson (LDV Capital)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1626469936442-PM3I390B3N26HMZ8JKYS/transparent.png</image:loc>
      <image:title>2021 Speakers - Final Remarks</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/xrc-leadership</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2023-05-23</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1639327944382-TI7M06Z8YBOSFUIOCH4X/harald.jpg</image:loc>
      <image:title>XRC Leadership - Harald Haraldsson</image:title>
      <image:caption>Director, XR Collaboratory</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1639327944382-TI7M06Z8YBOSFUIOCH4X/harald.jpg</image:loc>
      <image:title>XRC Leadership - Harald Haraldsson</image:title>
      <image:caption>Director, XR Collaboratory</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1670883684486-WLW8V65503L3YSXGZSO0/bil.jpg</image:loc>
      <image:title>XRC Leadership - William J Leon</image:title>
      <image:caption>XR Prototyper, XR Collaboratory</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1684856446163-N8X57TRI61K688MKWDSB/image-asset.jpeg</image:loc>
      <image:title>XRC Leadership - Sky Rolnick</image:title>
      <image:caption>Software Engineer, XR Collaboratory</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1627939365622-54GR5JOK15N45K770GSC/deborah_cropped_version2.png</image:loc>
      <image:title>XRC Leadership - Deborah Estrin</image:title>
      <image:caption>Affiliated Faculty, Professor, Computer Science</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1634177779056-326784UW8FIUG9NXHD48/Belongie_Serge_011hr5-1.jpg</image:loc>
      <image:title>XRC Leadership - Serge Belongie</image:title>
      <image:caption>Affiliated Faculty, Visiting Professor, Computer Science</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1634177740475-TVLDIHEUUBMB8P8QQR5M/abe.jpg</image:loc>
      <image:title>XRC Leadership - Abe Davis</image:title>
      <image:caption>Affiliated Faculty, Assistant Professor, Computer Science</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/2021-keynote-and-panel-videos</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2021-12-05</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1638729287703-KMWIUJZU9SQ7UIGJ2TI6/image-asset.jpeg</image:loc>
      <image:title>2021 Keynote and Panel Videos - Rachel McDonnell - Should we tread softly across the uncanny valley? (CV4ARVR 2021)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1638729287703-KMWIUJZU9SQ7UIGJ2TI6/image-asset.jpeg</image:loc>
      <image:title>2021 Keynote and Panel Videos - Rachel McDonnell - Should we tread softly across the uncanny valley? (CV4ARVR 2021)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1638728570726-MJ3ALZI3Y2HI47YUZQF5/image-asset.octet-stream</image:loc>
      <image:title>2021 Keynote and Panel Videos</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1638728726522-UE6V7H6IV9916HS723AD/image-asset.jpeg</image:loc>
      <image:title>2021 Keynote and Panel Videos - Matthias Grundmann - On-device ML solutions for Mobile and Web (CV4ARVR 2021)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1638728767437-NAF1Z4S3Q7Z023HCEIQ3/image-asset.jpeg</image:loc>
      <image:title>2021 Keynote and Panel Videos - Kaan Akşit - Towards Unifying Display Experiences with Computer-Generated Holography (CV4ARVR 2021)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1638728796506-II5D0IJ9QC2XM46QTX9B/image-asset.jpeg</image:loc>
      <image:title>2021 Keynote and Panel Videos - Christophe Peroz and Hiroshi Mukawa - What can AR/MR display do and not do in 2021? (CV4ARVR 2021)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1638728853994-B8VLMUZX0XDYP3B51LT1/image-asset.jpeg</image:loc>
      <image:title>2021 Keynote and Panel Videos - Panel Discussion #1 (CV4ARVR 2021)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1638728888603-0PKDLZ7ES3C6KCK99KWV/image-asset.jpeg</image:loc>
      <image:title>2021 Keynote and Panel Videos - Sujoy Ganguly - Customizable Computer Vision Expands Data Access Without Compromising Privacy</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1638728947454-PYF0JSY0I4JHEMXP3E74/image-asset.jpeg</image:loc>
      <image:title>2021 Keynote and Panel Videos - Kristen Grauman - First-Person Video for Interaction Learning (CV4ARVR 2021)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1638728981304-RRLZOIRBX9UR1UU3Y8EF/image-asset.jpeg</image:loc>
      <image:title>2021 Keynote and Panel Videos - Evan Nisselson - Investment Trends &amp; Opportunities (CV4ARVR 2021)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1638729013086-Y378P5ZW1IY1IRATVZVE/image-asset.jpeg</image:loc>
      <image:title>2021 Keynote and Panel Videos - Panel Discussion #2 (CV4ARVR 2021)</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2022/sponsors</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-05-04</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1075d6b3-d5c9-4c7b-899a-fca22bfdc4fb/meta_logo.png</image:loc>
      <image:title>CV4ARVR 2022 - Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1075d6b3-d5c9-4c7b-899a-fca22bfdc4fb/meta_logo.png</image:loc>
      <image:title>CV4ARVR 2022 - Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622661883058-YFJXP5EEB17LD7TVHLKQ/headroom_logo.png</image:loc>
      <image:title>CV4ARVR 2022 - Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622664892300-OTL3L1102RSAYRAJMMS8/cmu-wordmark-stacked-k.png</image:loc>
      <image:title>CV4ARVR 2022 - Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1644431183498-SHXZG5B8QIAW46NLT5B5/Meta_lockup_positive+primary_RGB.jpg</image:loc>
      <image:title>CV4ARVR 2022 - Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665706129-INN6TAZRHNXQW5INZN3J/university-of-copenhagen-logo.png</image:loc>
      <image:title>CV4ARVR 2022 - Sponsors</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665233780-RPKJ8DVJKW10PRBXSQ1O/cornell-tech-black.png</image:loc>
      <image:title>CV4ARVR 2022 - Sponsors</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/cv4arvr-2022-speakers</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2023-06-21</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655304356253-Y36SSQU95XAQS4PMNOYJ/image-asset.png</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - Opening Remarks - Sofien Bouaziz (Meta)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655304356253-Y36SSQU95XAQS4PMNOYJ/image-asset.png</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - Opening Remarks - Sofien Bouaziz (Meta)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655388984954-JUGDBRASWE1IOI65K36E/transparent.png</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - Philip Rosedale (Linden Lab)</image:title>
      <image:caption>(Remote) Talk Title: Computer Vision for Avatar Communication Bio: Philip Rosedale is the Founder of Linden Lab, parent company of Second Life, an open-ended, Internet-connected virtual world and pioneering metaverse. Following Second Life, he worked on several projects related to distributed work and computing. Excited by innovations in these areas and the proliferation of new VR-enabling devices, he re-entered the virtual worlds space in 2013, co-founding High Fidelity, a company devoted to exploring the future of next-generation shared virtual reality. Philip rejoined Second Life in 2022, as Strategic Advisor, focused on helping to shape and build a better metaverse. Prior to Linden Lab, Philip created an innovative Internet video conferencing product ("FreeVue"), which was later acquired by RealNetworks, where he went on to become Vice President and CTO.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1652278254111-ADJCMMDGM7QJHNF0QR2P/michael-kass.jpg</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - Michael Kass (NVIDIA)</image:title>
      <image:caption>(Remote) Talk Title: Computer Vision and the Metaverse Abstract: Computer vision technology has advanced tremendously in recent years, but it is of limited use by itself. It’s full value is only realized when it becomes well integrated into larger user solutions in the real or virtual world, involving training, inference, performance evaluation or other aspects. Custom integrations are always possible, but if we can integrate computer vision into standardized infrastructure, it will become vastly simpler to deploy. Our best example of a universal platform for integrating varied information technologies together is the world-wide web. Unfortunately the current web has its feet solidly planted in the 2D world. A variety of efforts have been made to add 3D capabilities to the web, but these have failed to address the central issue. 3D is fundamentally harder than 2D. So in order to create the solution we really want, we should start with the needs and requirements of 3D, and then integrate 2D into a proper 3D web instead of the other way around. Here, we explore what that proper 3D web should look like, and how it can act as the foundation of the metaverse. The full vision remains to be realized, but we will show some of the concrete steps that NVIDIA has already taken in this direction with the Omniverse platform. Bio: Michael Kass is a senior distinguished engineer at NVIDIA and the architect of NVIDIA Omniverse, NVIDIA's platform for collaborative 3D content creation and digital twins. In 2005, Kass received a Scientific and Technical Academy Award for “pioneering work in physically-based computer-generated techniques used to simulate realistic cloth in motion pictures.” In 2009, he received the ACM Computer Graphics Achievement Award for "his extensive and significant contributions to computer graphics, ranging from image processing to animation to modeling and in particular for his introduction of optimization techniques as a fundamental tool in graphics." And in 2017, the ACM honored him as an ACM Fellow “for contributions to computer vision and computer graphics, particularly optimization and simulation.” Kass has been granted over 30 U.S. patents, and was honored in 2018 as Inventor of the Year by the NY Intellectual Property Law Association. Before switching to computer graphics, he had an extensive career in computer vision. His Helmholtz-award winning computer vision paper “Snakes: Active contour models” is one of the most cited papers in computer science with over 25k citations. Kass holds a B.A. from Princeton, an M.S. from M.I.T. and a Ph.D. from Stanford.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1653420237152-V5P53TT6332BOCRDFKV2/transparent.png</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - AM Break</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655480687997-9ZUOAU0NA5UEQS7DCTJK/transparent.png</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - Erica Kaitz (Amelia Virtual Care)</image:title>
      <image:caption>(Remote) Talk Title: Virtual Reality and the treatment of PTSD and Behavioral Health Conditions Abstract: In this presentation I will review the value of VR in treating PTSD and in assisting patients in processing and working through behavioral health conditions. Bio: Erica Kaitz, LCSW, is the VP of Behavioral Health at Amelia Virtual Care.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1652278254565-SMFIREF1A1XSNHFPMVU4/ronald-mallet.jpg</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - Ronald Mallet (Meta)</image:title>
      <image:caption>(In person) Title: Biophysical Digital Characters in the Metaverse Abstract: To be successful, the Metaverse will need to provide its users a full sense of immersion and the ability to naturally interact with the world and others around them, in ways that they feel comfortable with. It also will present the unique opportunity for users to control their appearance and express themselves in distinctive and creative ways, beyond what’s possible in the real world. In this talk we’ll explore ideas and concepts to enable such capabilities at scale, and discuss what kind of technological breakthroughs and intuitive content creation tools will be needed. Bio: Ronald Mallet is the director and founder of Meta Reality Labs Research in Sausalito &amp; Zurich, working on the future of digital humans and characters for AR/VR applications. The team's research includes biomechanical motion analysis, data-driven biophysical simulations, machine perception, and photorealistic rendering, spanning sub-disciplines in computer vision, computer graphics, and machine learning. Prior to joining Meta, he was a lead researcher at Industrial Light &amp; Magic, a Lucasfilm division, working on cutting edge technologies to deliver visual effects and digital characters for high-end feature films, including Avatar, Star Wars, Harry Potter, Pirates of the Caribbean, and many others. He received an Academy Award for Technical Achievement for his ground breaking work on markerless full body on-set motion capture. Prior to ILM, he held various research and engineering positions, including leading the awarding-winning MatchMover software project for 3d camera tracking.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1653420388359-EEPEQ7D755ZNYZHBIZA9/transparent.png</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - Panel #1: Philip Rosedale (Linden Lab), Michael Kass (NVIDIA), Erica Kaitz (Amelia Virtual Care), Ronald Mallet (Meta)</image:title>
      <image:caption>Moderators: Andrew Rabinovich (Headroom Inc.), Serge Belongie (University of Copenhagen)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1653420456163-6D0DFKZBUEV88GKZ4ZIK/transparent.png</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - Lunch Break</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1653420512751-TBMHGN9MEBS8IQQQW1B7/transparent.png</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - Poster Session - Organizer: Harald Haraldsson (Cornell Tech)</image:title>
      <image:caption>For in-person attendees and authors: Poster hall, boards 8b-27b (see signs at venue). For virtual attendees and authors: Virtual poster session on Gatherly (see link on CVPR virtual site). Spotlight videos can be watched asynchronously here. To interact with authors please join the poster session. See all submissions here.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1652278255279-LXMTYNYTKTTOY12IS5MI/sergey-tulyakov.jpg</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - Sergey Tulyakov (Snap Inc.)</image:title>
      <image:caption>(In person) Title: Object Digitization, Manipulation and Rendering for Immersive Experiences Abstract: The digitization, manipulation and rendering of objects requires a great deal of skill and time, substantially limiting the available immersive experiences. In our work, we build tools to simplify this process and make it intuitive. To move the needle here, there are two key questions that need to be considered: 1) how to digitize and insert objects into existing scenes and 2) how does one interactively manipulate objects within volumetric environments? We’d like to solve both in 3D. To answer the first question, we show an efficient method to neural object capture and rendering from online images. Given images with the same object in different environments and lighting conditions, our method estimates material properties making it seamless to insert neural objects into scenes. To answer the second question we present a new volumetric representation, dubbed “playable environments,” which allows one to play objects inside scenes with intuitive controls, camera and style changes, making scene manipulation akin to playing a game. Bio: Sergey Tulyakov is a Principal Research Scientist heading the Creative Vision team at Snap Inc. His work focuses on creating methods for manipulating the world via computer vision and machine learning. This includes human and object understanding, photorealistic manipulation and animation, video synthesis, prediction and retargeting. He pioneered the unsupervised image animation domain with MonkeyNet and First Order Motion Model that sparked a number of startups in the domain. His work on Interactive Video Stylization received the Best in Show Award at SIGGRAPH Real-Time Live! 2020. He has published 30+ top conference papers, journals and patents resulting in multiple innovative products, including Snapchat Pet Tracking, OurBaby, Real-time Neural Lenses (gender swap, baby face, aging lens, face animation) and many others. Before joining Snap Inc., Sergey was with Carnegie Mellon University, Microsoft, NVIDIA. He holds a PhD degree from the University of Trento, Italy.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1653420658277-RIEYMW31IBGNW8SPUFLU/WhatsApp+Image+2022-03-22+at+16.30.36.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - Lourdes Agapito (University College London)</image:title>
      <image:caption>(Remote) Learning 3D Representations of the World from Images and Video Bio: Lourdes Agapito holds the position of Professor of 3D Vision at the Department of Computer Science, University College London (UCL). Her research in computer vision has consistently focused on the inference of 3D information from single images or videos acquired from a single moving camera. She received her BSc, MSc and PhD degrees from the Universidad Complutense de Madrid (Spain). In 1997 she joined the Robotics Research Group at the University of Oxford as an EU Marie Curie Postdoctoral Fellow. In 2001 she was appointed as Lecturer at the Department of Computer Science at Queen Mary University of London. From 2008 to 2014 she held an ERC Starting Grant funded by the European Research Council to focus on theoretical and practical aspects of deformable 3D reconstruction from monocular sequences. In 2013 she joined the Department of Computer Science at University College London and was promoted to full professor in 2015. She now heads the Vision and Imaging Science Group, is a founding member of the AI centre and co-director of the Centre for Doctoral Training in Foundational AI. Lourdes serves regularly as Area Chair for the top Computer Vision conferences (CVPR, ICCV, ECCV) was Program Chair for CVPR 2016 and will serve again for ICCV 2023. She was keynote speaker at ICRA 2017 and ICLR 2021. In 2017 she co-founded Synthesia, the London based synthetic media startup responsible for the AI technology behind the Malaria no More video campaign that saw David Beckham speak 9 different languages to call on world leaders to take action to defeat Malaria.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1653420787252-HYROY3QC27ER488TDMQJ/transparent.png</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - PM Break</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1652278254017-B4K6NAB1M6XIP1367LEP/angela-dai.jpg</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - Angela Dai (TU Munich)</image:title>
      <image:caption>(In person) Title: Towards Commodity 3D Content Creation Abstract: With the increasing availability of high quality imaging and even depth imaging now available as commodity sensors, comes the potential to democratize 3D content creation. State-of-the-art reconstruction results from commodity RGB and RGB-D sensors have achieved impressive tracking, but reconstructions remain far from usable in practical applications such as mixed reality or content creation, since they do not match the high quality of artist-modeled 3D graphics content: models remain incomplete, unsegmented, and with low-quality texturing. In this talk, we will address these challenges: I will present a self-supervised approach to learn effective geometric priors from limited real-world 3D data, then discuss object-level understanding of from a single image, followed by realistic 3D texturing from real-world image observations. This will help to enable a closer step towards commodity 3D content creation. Bio: Angela Dai is an Assistant Professor at the Technical University of Munich where she leads the 3D AI group. Prof. Dai's research focuses on understanding how the 3D world around us can be modeled and semantically understood. Previously, she received her PhD in computer science from Stanford in 2018 and her BSE in computer science from Princeton in 2013. Her research has been recognized through a Eurographics Young Researcher Award, ZDB Junior Research Group Award, an ACM SIGGRAPH Outstanding Doctoral Dissertation Honorable Mention, as well as a Stanford Graduate Fellowship.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1654553914997-KCJGPKPYQBSEAVQ39O8M/transparent.png</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - Omer Shapira (NVIDIA)</image:title>
      <image:caption>(Remote) Title: Hyperscale Spatial Computing: Implications of Inverting the Bandwidth Funnel Abstract: Recent advances in compute pipelines have enabled leaps in body-centered technology such as Ray-Traced Virtual Reality. Simultaneously, network bottlenecks have decreased to the point that streaming pixels directly from datacenters to HMDs is a reality. This talk explores the potential of body-centered computing at datacenter scales - what applications, experiences and new science it enables. Bio: Omer Shapira is an Engineer and Artist, leading the Omniverse Extended Reality group at NVIDIA. Omer’s work and research focuses on Virtual Reality, Human-Robot Interaction, Synthetic Data for Autonomous Systems, Haptics, and Collaborative Hyperscale Computing. Omer's work has been published and displayed at SIGGRAPH, IEEE Robosoft, CVPR, The Barbican, Tribeca Film Festival, Sundance Film Festival, Eyebeam and others. Before working at NVIDIA, Omer was Director of Virtual Reality at Fake Love (A New York Times Company), Software Engineer at Framestore and Director, Editor and Talent at Channel 10 (Israel). Omer studied Mathematics and Linguistics in Tel Aviv University and HCI in New York University.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1653420930453-E2PF5UVHAKW18BCPZGU7/transparent.png</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - Panel #2: Sergey Tulyakov (Snap Inc.), Lourdes Agapito (University College London), Angela Dai (TU Munich), Omer Shapira (NVIDIA)</image:title>
      <image:caption>Moderators: Fernando De la Torre (CMU), Natalia Neverova (Meta)</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1653420974048-2KPL81P8RVANVLMZR2LS/transparent.png</image:loc>
      <image:title>CV4ARVR 2022 - Speakers - Concluding Remarks</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/collaboratory/news</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-12-13</lastmod>
  </url>
  <url>
    <loc>https://xr.cornell.edu/collaboratory/news/2022/6/6/test-title</loc>
    <changefreq>monthly</changefreq>
    <priority>0.5</priority>
    <lastmod>2022-12-13</lastmod>
  </url>
  <url>
    <loc>https://xr.cornell.edu/initiatives</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2024-05-28</lastmod>
    <image:image>
      <image:loc>https://static1.squarespace.com/static/5c3f69e1cc8fedbc039ea739/t/665654fd9f7eca64aa30f9fc/1716933717108/</image:loc>
      <image:title>Initiatives - Design and Augmented Intelligence Lab (DAIL)</image:title>
      <image:caption>Website</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://static1.squarespace.com/static/5c3f69e1cc8fedbc039ea739/629e44e50f4f6f6632a737a2/629e4e403362486c52687b65/1716933717108/</image:loc>
      <image:title>Initiatives - Design and Augmented Intelligence Lab (DAIL)</image:title>
      <image:caption>Website</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://static1.squarespace.com/static/5c3f69e1cc8fedbc039ea739/629e44e50f4f6f6632a737a2/629e4dbc8396c92b0618af04/1716933744857/</image:loc>
      <image:title>Initiatives - D.U.E.T. Lab</image:title>
      <image:caption>Website</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://static1.squarespace.com/static/5c3f69e1cc8fedbc039ea739/629e44e50f4f6f6632a737a2/629e4d168822f504bbfc6aa9/1716933773863/</image:loc>
      <image:title>Initiatives - Graphics and Vision Group</image:title>
      <image:caption>Website</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://static1.squarespace.com/static/5c3f69e1cc8fedbc039ea739/629e44e50f4f6f6632a737a2/629e4e0dbe8e714fde8bbbf9/1716933762209/</image:loc>
      <image:title>Initiatives - Sabin Lab</image:title>
      <image:caption>Website</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://static1.squarespace.com/static/5c3f69e1cc8fedbc039ea739/629e44e50f4f6f6632a737a2/629e4cebc85c125a2abe676b/1716933785361/</image:loc>
      <image:title>Initiatives - SE(3) Computer Vision Group</image:title>
      <image:caption>Website</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://static1.squarespace.com/static/5c3f69e1cc8fedbc039ea739/629e44e50f4f6f6632a737a2/629e4d8fa588d251f8985a1b/1716933800385/</image:loc>
      <image:title>Initiatives - SciFi Lab</image:title>
      <image:caption>Website</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://static1.squarespace.com/static/5c3f69e1cc8fedbc039ea739/629e44e50f4f6f6632a737a2/629e4926c85c125a2abdc322/1716933885973/</image:loc>
      <image:title>Initiatives - The Virtual Embodiment Lab</image:title>
      <image:caption>Website</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://static1.squarespace.com/static/5c3f69e1cc8fedbc039ea739/629e44e50f4f6f6632a737a2/629e453870371a7e77c2129b/1716933816451/</image:loc>
      <image:title>Initiatives - Workshop on Computer Vision for AR/VR (CV4ARVR)</image:title>
      <image:caption>Website</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://static1.squarespace.com/static/5c3f69e1cc8fedbc039ea739/629e44e50f4f6f6632a737a2/629e45870fe55d40355c35b7/1716933829802/</image:loc>
      <image:title>Initiatives - XR Access</image:title>
      <image:caption>Website</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://static1.squarespace.com/static/5c3f69e1cc8fedbc039ea739/629e44e50f4f6f6632a737a2/629e44f1680d1a6e5cd1ee5e/1716933841724/</image:loc>
      <image:title>Initiatives - XR Collaboratory</image:title>
      <image:caption>Website</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/cv4arvr-2022-papers</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-06-14</lastmod>
    <image:image>
      <image:loc>https://static1.squarespace.com/static/5c3f69e1cc8fedbc039ea739/t/62a90f358f45585cdf79ebf8/1654190738921/</image:loc>
      <image:title>CV4ARVR 2022 - Papers - A-NeRF: Articulated Neural Radiance Fields for Learning Human Shape, Appearance, and Pose</image:title>
      <image:caption>Shih-Yang Su (University of British Columbia); Frank Yu (University of British Columbia); Michael Zollhöfer (Facebook Reality Labs); Helge Rhodin (UBC) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://static1.squarespace.com/static/5c3f69e1cc8fedbc039ea739/6298f118f0e3ae4128957d10/6298f25d92c46d4ded050152/1654190738921/</image:loc>
      <image:title>CV4ARVR 2022 - Papers - A-NeRF: Articulated Neural Radiance Fields for Learning Human Shape, Appearance, and Pose</image:title>
      <image:caption>Shih-Yang Su (University of British Columbia); Frank Yu (University of British Columbia); Michael Zollhöfer (Facebook Reality Labs); Helge Rhodin (UBC) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655246634722-PUL8QJ8PKZNW1NLS494Q/image-asset.png</image:loc>
      <image:title>CV4ARVR 2022 - Papers - A-NeRF: Articulated Neural Radiance Fields for Learning Human Shape, Appearance, and Pose</image:title>
      <image:caption>Shih-Yang Su (University of British Columbia); Frank Yu (University of British Columbia); Michael Zollhöfer (Facebook Reality Labs); Helge Rhodin (UBC) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655241192217-U2KFVSHQ44PV2BA8FU73/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - AssistSR: Task-oriented Question-driven Video Segment Retrieval (CV4ARVR 2022)</image:title>
      <image:caption>Stan Weixian Lei (National University of Singapore); Yuxuan Wang (National University of Singapore); Dongxing Mao (National University of Singapore); Lingmin Ran (National University of Singapore); Difei Gao (NUS); Mike Zheng Shou (National University of Singapore) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655241277694-0WZ7OO1CUFKK5L35NPO8/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - BlazePose GHUM Holistic: Real-time 3D Human Landmarks and Pose Estimation (CV4ARVR 2022)</image:title>
      <image:caption>Ivan Grishchenko (Google LLC); Valentin Bazarevsky (Google LLC); Andrei Zanfir (); Eduard Gabriel Bazavan (Google); Mihai Zanfir (Adecco/Google); Richard Yee (Google LLC); Karthik Raveendran (Google); Matsvei Zhdanovich‎ (Google); Matthias Grundmann (Google Research); Cristian Sminchisescu (Google) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655241353752-J9A7JOXSOJ94900L2WTF/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - Domain Adaptive 3D Pose Augmentation for In-the-wild Human Mesh Recovery (CV4ARVR 2022)</image:title>
      <image:caption>Zhenzhen Weng (Stanford University); Kuan-Chieh Wang (Stanford University); Angjoo Kanazawa (University of California Berkeley); Serena Yeung (Stanford University) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655241411317-ZHVTLMAZ7TBOPCFYQ17Y/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - Editable indoor lighting estimation</image:title>
      <image:caption>Henrique Weber (Université Laval); Mathieu Garon (Depix); Jean-Francois Lalonde (Université Laval) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655241580842-KMFEOZSFHA53VY2MG1AC/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - Efficient Biologically Inspired Eye Tracking for Mixed Reality (CV4ARVR 2022)</image:title>
      <image:caption>Benjamin Lundell (Microsoft); Paulo R dos Santos Mendonca (Microsoft); Christopher Mei (Microsoft) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655241648587-TSTYY9B3MYTK0L2VIW0K/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - Efficient Heterogeneous Video Segmentation at the Edge (CV4ARVR 2022)</image:title>
      <image:caption>Jamie Menjay Lin (Google); Siargey Pisarchyk (Google LLC); Juhyun Lee (Google LLC); David Tian (Google Inc); Tingbo Hou (Google Research); Karthik Raveendran (Google); Raman Sarokin (Google LLC); George Sung (Google LLC); Trent Tolley (Google LLC); Matthias Grundmann (Google Research) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655241702320-WNXNGJ603LG6TLUH3HBG/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - Facial De-occlusion Network for Virtual Telepresence Systems (CV4ARVR 2022)</image:title>
      <image:caption>Surabhi Gupta (CVIT, IIIT-Hyderabad); Ashwath Shetty (CVIT, IIIT-Hyderabad); Avinash Sharma (CVIT, IIIT-Hyderabad) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655241760147-8XLLC0X2Z10P6VD49QM0/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - Feature Refinement to Improve High Resolution Image Inpainting (CV4ARVR 2022)</image:title>
      <image:caption>Prakhar Kulshreshtha (Geomagical Labs, Inc.); Brian Pugh (Geomagical Labs, Inc.); Salma Jiddi (Geomagical Labs) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655241807503-BQHZ9GA3K4YJBI22F42Q/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - Generating Diverse 3D Reconstructions from a Single Occluded Face Image (CV4ARVR 2022)</image:title>
      <image:caption>Rahul Dey (Michigan State University); Vishnu Boddeti (Michigan State University) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655241878919-6PQFO7YUT5860DLNU9GK/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - Guided Co-Modulated GAN for 360° Field of View Extrapolation (CV4ARVR 2022)</image:title>
      <image:caption>Mohammad Reza Karimi Dastjerdi (Université Laval); Yannick Hold-Geoffroy (Adobe Research); Jonathan Eisenmann (Adobe); Siavash Khodadadeh (University of Central Florida); Jean-Francois Lalonde (Université Laval) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655241922497-4YOTSJVDQ2W8BJ8I540S/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - Latents2Segments: Disentangling the Latent Space of Gen. Models for Semantic Segm. of Face Images</image:title>
      <image:caption>Snehal Singh Tomar (Indian Institute of Technology Madras); Rajagopalan N Ambasamudram (Indian Institute of Technology Madras) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655241978595-44JIQWHQODASV3L5L8HE/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - Neural Volumetric Object Selection (CV4ARVR 2022)</image:title>
      <image:caption>Zhongzheng Ren (UIUC); Aseem Agarwala (Google); Bryan Russell (Adobe Research); Alexander Schwing (UIUC); Oliver Wang (Adobe Systems Inc) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655242028956-Y5SL5RD2TBEWF9LWL1TQ/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - PlanarRecon: Real-time 3D Plane Detection and Reconstruction from Posed Monocular Videos</image:title>
      <image:caption>Yiming Xie (Northeastern University); Matheus A Gadelha (University of Massachusetts Amherst); Fengting Yang (Pennsylvania State University ); Xiaowei Zhou (Zhejiang University)*; Huaizu Jiang (Northeastern University) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655242193398-PIERRK1WS4OQLWUXBHCX/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - Real-Time On-Device Face Restoration with Cross-Domain Distillation (CV4ARVR 2022)</image:title>
      <image:caption>Yu-Chuan Su (Google)*; Yang Zhao (Google); xuhui jia (google); Andrey Vakunov‎ (Google); Tingbo Hou (Google Research); Matthias Grundmann (Google Research) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655242243373-NBGU5WTOQ2CZ4H21U0IV/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - Single-Camera 3D Head Fitting for Mixed Reality Clinical Applications (CV4ARVR 2022)</image:title>
      <image:caption>Tejas Mane (University of Pennsylvania); Aylar Bayramova (Penn Medicine); Kostas Daniilidis (University of Pennsylvania); Philippos Mordohai (Stevens Institute of Technology); Elena Bernardis (University of Pennsylvania) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655242287103-UF0B7X5E1HCAXWUM0FRE/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - SparseFormer: Attention-based Depth Completion Network (CV4ARVR 2022)</image:title>
      <image:caption>Frederik R Warburg (The Technical University of Denmark); Manuel López Antequera (Facebook); Michaël Ramamonjisoa (Ecole des Ponts) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655242337393-UUT8H291QSR2BFIIR82P/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - TernaryNeRF: Reducing the Precision of Positional Encodings (CV4ARVR 2022)</image:title>
      <image:caption>Seungyeop Kang (Seoul National University); Sungjoo Yoo (Seoul National University) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655246119608-4YT82MY37YBQDZUP378C/transparent.png</image:loc>
      <image:title>CV4ARVR 2022 - Papers - Topologically-Aware Deformation Fields for Single-View 3D Reconstruction</image:title>
      <image:caption>Shivam Duggal (Carnegie Mellon University); Deepak Pathak (Carnegie Mellon University) PDF</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1655242381602-8HYAZH9T0TT5P5CSYF35/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Papers - Video Extrapolation in Space and Time (CV4ARVR 2022)</image:title>
      <image:caption>Yunzhi Zhang (Stanford University); Jiajun Wu (Stanford University) PDF</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2022/keynote-and-panel-videos</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-06-24</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1656088950070-9V2OWZK2WQYXSOF4V9AW/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Keynote and Panel Videos - Philip Rosedale - Computer Vision for Avatar Communication (CV4ARVR 2022)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1656088950070-9V2OWZK2WQYXSOF4V9AW/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Keynote and Panel Videos - Philip Rosedale - Computer Vision for Avatar Communication (CV4ARVR 2022)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1656088950054-E2AEJP895MC95753JZCF/image-asset.octet-stream</image:loc>
      <image:title>CV4ARVR 2022 - Keynote and Panel Videos</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1656088997631-446I12MPHGJ2SFBUJ3K6/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Keynote and Panel Videos - Michael Kass - Computer Vision and the Metaverse (CV4ARVR 2022)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1656089065257-F3VPA1IU2401H7RC0ACU/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Keynote and Panel Videos - Erica Kaitz - Virtual Reality and the treatment of PTSD and Behavioral Health Conditions (CV4ARVR 2022)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1656089098059-BGUQFZV296CLWDNABS1T/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Keynote and Panel Videos - Ronald Mallet - Biophysical Digital Characters in the Metaverse (CV4ARVR 2022)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1656089129693-5WHVX3D58UI3QUPWTTMX/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Keynote and Panel Videos - Panel Discussion #1 (CV4ARVR 2022)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1656089159494-SAZIWJFP4LQJAX61MXFN/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Keynote and Panel Videos - Sergey Tulyakov - Object Digitization, Manipulation and Rendering for Immersive Experiences (CV4ARVR 2022)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1656089184663-LLNC68QDAM8MTCTYXP3H/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Keynote and Panel Videos - Lourdes Agapito - Learning 3D Representations of the World from Images and Video (CV4ARVR 2022)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1656089208448-LSA79G2FYMNUXAFNZIPY/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Keynote and Panel Videos - Angela Dai - Towards Commodity 3D Content Creation (CV4ARVR 2022)</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1656089240106-QZJ0C5SGP310CHWDVRG4/image-asset.jpeg</image:loc>
      <image:title>CV4ARVR 2022 - Keynote and Panel Videos - Omer Shapira - Hyperscale Spatial Computing: Implications of Inverting the Bandwidth Funnel (CV4ARVR 2022)</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/new-gallery-1</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-12-14</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1671044623487-PY4ZY8VT9UEDRSOYGK4A/image-asset.jpeg</image:loc>
      <image:title>XRC Community Gallery - XR Retreat 2022 - poster session</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1671044623487-PY4ZY8VT9UEDRSOYGK4A/image-asset.jpeg</image:loc>
      <image:title>XRC Community Gallery - XR Retreat 2022 - poster session</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1671044675012-HMT9C4XAN6ACWN770EMH/cv4arvr_2022_posters.jpg</image:loc>
      <image:title>XRC Community Gallery - CV4ARVR 2022 - poster session</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1671044706438-ZF3P9KPU5MN269VN9NMO/xrc_showcase_2019.JPG</image:loc>
      <image:title>XRC Community Gallery - XRC Showcase 2019</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2020/people</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-10-24</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>CV4ARVR 2020 People</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704795-HSS92RTBW3XW4LZ2G7AD/_ml.jpg</image:loc>
      <image:title>CV4ARVR 2020 People</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704829-H6P6A3MZRNU0ZWV2WO3T/_g.jpg</image:loc>
      <image:title>CV4ARVR 2020 People</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550176276905-583SZJTMR2JGDXL0JRX0/_c.jpg</image:loc>
      <image:title>CV4ARVR 2020 People</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2020/submission</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2020-06-12</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>CV4ARVR 2020 Submission</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704795-HSS92RTBW3XW4LZ2G7AD/_ml.jpg</image:loc>
      <image:title>CV4ARVR 2020 Submission</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704829-H6P6A3MZRNU0ZWV2WO3T/_g.jpg</image:loc>
      <image:title>CV4ARVR 2020 Submission</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550176276905-583SZJTMR2JGDXL0JRX0/_c.jpg</image:loc>
      <image:title>CV4ARVR 2020 Submission</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2019/submission</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2020-01-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>CV4ARVR 2019 Submission</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704795-HSS92RTBW3XW4LZ2G7AD/_ml.jpg</image:loc>
      <image:title>CV4ARVR 2019 Submission</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704829-H6P6A3MZRNU0ZWV2WO3T/_g.jpg</image:loc>
      <image:title>CV4ARVR 2019 Submission</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550176276905-583SZJTMR2JGDXL0JRX0/_c.jpg</image:loc>
      <image:title>CV4ARVR 2019 Submission</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2019/program</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2020-05-02</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1551214588637-F86SG154B086QJRSLRSI/paul_debevec.jpg</image:loc>
      <image:title>CV4ARVR 2019 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550665571082-BTV7DWNN1RUIN2I95NJE/jean-yves-bouguet-63bb7.jpg</image:loc>
      <image:title>CV4ARVR 2019 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550670431836-6007GR26VN1JJAXLFS8M/gordon_wetzstein_small.jpg</image:loc>
      <image:title>CV4ARVR 2019 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>CV4ARVR 2019 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704795-HSS92RTBW3XW4LZ2G7AD/_ml.jpg</image:loc>
      <image:title>CV4ARVR 2019 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704829-H6P6A3MZRNU0ZWV2WO3T/_g.jpg</image:loc>
      <image:title>CV4ARVR 2019 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550176276905-583SZJTMR2JGDXL0JRX0/_c.jpg</image:loc>
      <image:title>CV4ARVR 2019 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1551215126778-VRBW8YYCIROMXB2NKO7F/RichardNewcombe2019.JPG</image:loc>
      <image:title>CV4ARVR 2019 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550670098566-PBM968QB7OQZZQGDQU3N/Theobalt.jpg</image:loc>
      <image:title>CV4ARVR 2019 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550592338515-DKPJBS4YNXS9ENAAJDU1/matthias_photo1.jpg</image:loc>
      <image:title>CV4ARVR 2019 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1551186733919-N536WW35RP89RZ49441X/YaserPhoto.jpg</image:loc>
      <image:title>CV4ARVR 2019 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1551448545227-1WHXQ6VYD3XMM4KC5PV2/sharam.jpeg</image:loc>
      <image:title>CV4ARVR 2019 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550587830648-I26HLT9ZK8WNCMRVN4OR/MattMiesnieks.jpg</image:loc>
      <image:title>CV4ARVR 2019 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1547681472661-ZCLCFU0UAXS9G6LUVP1P/Marc-1-180x180.jpg</image:loc>
      <image:title>CV4ARVR 2019 Program - Marc Pollefeys</image:title>
      <image:caption>Marc Pollefeys is Director of Science leading a team of scientist and engineers to develop advanced perception capabilities for HoloLens. He is also a Professor of Computer Science at ETH Zurich and was elected Fellow of the IEEE in 2012. He is best known for his work in 3D computer vision, having been the first to develop a software pipeline to automatically turn photographs into 3D models, but also works on robotics, graphics and machine learning problems. Other noteworthy projects he worked on with collaborators at UNC Chapel Hill and ETH Zurich are real-time 3D scanning with mobile devices, a real-time pipeline for 3D reconstruction of cities from vehicle mounted-cameras, camera-based self-driving cars and the first fully autonomous vision-based drone. Most recently his academic research has focused on combining 3D reconstruction with semantic scene understanding. He has published over 250 peer-reviewed publications and holds several patents. His lab at ETH Zurich also developed the PixHawk auto-pilot which can be found in over half a million drones and he has co-founded several computer vision start-ups.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1551210084626-GC8TKC6F1DOP3ILF2EPW/Torralbaheadshot.jpg</image:loc>
      <image:title>CV4ARVR 2019 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1551119995866-8B4AM8MEMRG3624IMN0U/MSR_July_2018_124_N8A1033+%282%29.jpg</image:loc>
      <image:title>CV4ARVR 2019 Program</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2019/people</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2020-01-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>CV4ARVR 2019 People</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704795-HSS92RTBW3XW4LZ2G7AD/_ml.jpg</image:loc>
      <image:title>CV4ARVR 2019 People</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704829-H6P6A3MZRNU0ZWV2WO3T/_g.jpg</image:loc>
      <image:title>CV4ARVR 2019 People</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550176276905-583SZJTMR2JGDXL0JRX0/_c.jpg</image:loc>
      <image:title>CV4ARVR 2019 People</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2019</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2020-01-09</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>Overview</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704795-HSS92RTBW3XW4LZ2G7AD/_ml.jpg</image:loc>
      <image:title>Overview</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704829-H6P6A3MZRNU0ZWV2WO3T/_g.jpg</image:loc>
      <image:title>Overview</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550176276905-583SZJTMR2JGDXL0JRX0/_c.jpg</image:loc>
      <image:title>Overview</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2019/papers</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2020-01-09</lastmod>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2020</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2020-06-19</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>CV4ARVR 2020</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704795-HSS92RTBW3XW4LZ2G7AD/_ml.jpg</image:loc>
      <image:title>CV4ARVR 2020</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704829-H6P6A3MZRNU0ZWV2WO3T/_g.jpg</image:loc>
      <image:title>CV4ARVR 2020</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550176276905-583SZJTMR2JGDXL0JRX0/_c.jpg</image:loc>
      <image:title>CV4ARVR 2020</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-05-04</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1075d6b3-d5c9-4c7b-899a-fca22bfdc4fb/meta_logo.png</image:loc>
      <image:title>CV4ARVR</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622661883058-YFJXP5EEB17LD7TVHLKQ/headroom_logo.png</image:loc>
      <image:title>CV4ARVR</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622664892300-OTL3L1102RSAYRAJMMS8/cmu-wordmark-stacked-k.png</image:loc>
      <image:title>CV4ARVR</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665706129-INN6TAZRHNXQW5INZN3J/university-of-copenhagen-logo.png</image:loc>
      <image:title>CV4ARVR</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665233780-RPKJ8DVJKW10PRBXSQ1O/cornell-tech-black.png</image:loc>
      <image:title>CV4ARVR</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2020/program</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2023-04-11</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>CV4ARVR 2020 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704795-HSS92RTBW3XW4LZ2G7AD/_ml.jpg</image:loc>
      <image:title>CV4ARVR 2020 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704829-H6P6A3MZRNU0ZWV2WO3T/_g.jpg</image:loc>
      <image:title>CV4ARVR 2020 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550176276905-583SZJTMR2JGDXL0JRX0/_c.jpg</image:loc>
      <image:title>CV4ARVR 2020 Program</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2020/papers</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2020-06-19</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1592606592206-LO88CKRXUV4J12YBC2CS/session.png</image:loc>
      <image:title>CV4ARVR 2020 Papers - Join the Discord Server</image:title>
      <image:caption>The authors of “Multi-user, Scalable 3D Object Detection in AR Cloud” present their work during the virtual poster session on June 19, 2020.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2021/program</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2021-12-05</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>CV4ARVR 2021 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622661883058-YFJXP5EEB17LD7TVHLKQ/headroom_logo.png</image:loc>
      <image:title>CV4ARVR 2021 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622664892300-OTL3L1102RSAYRAJMMS8/image-asset.png</image:loc>
      <image:title>CV4ARVR 2021 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665706129-INN6TAZRHNXQW5INZN3J/image-asset.png</image:loc>
      <image:title>CV4ARVR 2021 Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665233780-RPKJ8DVJKW10PRBXSQ1O/image-asset.png</image:loc>
      <image:title>CV4ARVR 2021 Program</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2021/people</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-10-24</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>CV4ARVR 2021 People</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622661883058-YFJXP5EEB17LD7TVHLKQ/headroom_logo.png</image:loc>
      <image:title>CV4ARVR 2021 People</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622664892300-OTL3L1102RSAYRAJMMS8/image-asset.png</image:loc>
      <image:title>CV4ARVR 2021 People</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665706129-INN6TAZRHNXQW5INZN3J/image-asset.png</image:loc>
      <image:title>CV4ARVR 2021 People</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665233780-RPKJ8DVJKW10PRBXSQ1O/image-asset.png</image:loc>
      <image:title>CV4ARVR 2021 People</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2021</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2021-07-15</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>CV4ARVR 2021</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622661883058-YFJXP5EEB17LD7TVHLKQ/headroom_logo.png</image:loc>
      <image:title>CV4ARVR 2021</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622664892300-OTL3L1102RSAYRAJMMS8/image-asset.png</image:loc>
      <image:title>CV4ARVR 2021</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665706129-INN6TAZRHNXQW5INZN3J/image-asset.png</image:loc>
      <image:title>CV4ARVR 2021</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665233780-RPKJ8DVJKW10PRBXSQ1O/image-asset.png</image:loc>
      <image:title>CV4ARVR 2021</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2021/submission</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2021-08-24</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>CV4ARVR 2021</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622661883058-YFJXP5EEB17LD7TVHLKQ/headroom_logo.png</image:loc>
      <image:title>CV4ARVR 2021</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622664892300-OTL3L1102RSAYRAJMMS8/image-asset.png</image:loc>
      <image:title>CV4ARVR 2021</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665706129-INN6TAZRHNXQW5INZN3J/image-asset.png</image:loc>
      <image:title>CV4ARVR 2021</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665233780-RPKJ8DVJKW10PRBXSQ1O/image-asset.png</image:loc>
      <image:title>CV4ARVR 2021</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2022</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-06-02</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1075d6b3-d5c9-4c7b-899a-fca22bfdc4fb/meta_logo.png</image:loc>
      <image:title>CV4ARVR 2022</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622661883058-YFJXP5EEB17LD7TVHLKQ/headroom_logo.png</image:loc>
      <image:title>CV4ARVR 2022</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622664892300-OTL3L1102RSAYRAJMMS8/cmu-wordmark-stacked-k.png</image:loc>
      <image:title>CV4ARVR 2022</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665706129-INN6TAZRHNXQW5INZN3J/university-of-copenhagen-logo.png</image:loc>
      <image:title>CV4ARVR 2022</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665233780-RPKJ8DVJKW10PRBXSQ1O/cornell-tech-black.png</image:loc>
      <image:title>CV4ARVR 2022</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2021/papers</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2021-10-23</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1550171704483-Y3IE3HCW4BJPH99OFJD7/_f.jpg</image:loc>
      <image:title>CV4ARVR 2021 Papers</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622661883058-YFJXP5EEB17LD7TVHLKQ/headroom_logo.png</image:loc>
      <image:title>CV4ARVR 2021 Papers</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622664892300-OTL3L1102RSAYRAJMMS8/image-asset.png</image:loc>
      <image:title>CV4ARVR 2021 Papers</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665706129-INN6TAZRHNXQW5INZN3J/image-asset.png</image:loc>
      <image:title>CV4ARVR 2021 Papers</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665233780-RPKJ8DVJKW10PRBXSQ1O/image-asset.png</image:loc>
      <image:title>CV4ARVR 2021 Papers</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1634908771292-7FHCE8DETYA8GIWPZQAS/fe.png</image:loc>
      <image:title>CV4ARVR 2021 Papers - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2022/submission</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-06-02</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1075d6b3-d5c9-4c7b-899a-fca22bfdc4fb/meta_logo.png</image:loc>
      <image:title>CV4ARVR 2022 - Submission</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622661883058-YFJXP5EEB17LD7TVHLKQ/headroom_logo.png</image:loc>
      <image:title>CV4ARVR 2022 - Submission</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622664892300-OTL3L1102RSAYRAJMMS8/cmu-wordmark-stacked-k.png</image:loc>
      <image:title>CV4ARVR 2022 - Submission</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665706129-INN6TAZRHNXQW5INZN3J/university-of-copenhagen-logo.png</image:loc>
      <image:title>CV4ARVR 2022 - Submission</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665233780-RPKJ8DVJKW10PRBXSQ1O/cornell-tech-black.png</image:loc>
      <image:title>CV4ARVR 2022 - Submission</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2022/program</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-11-23</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1075d6b3-d5c9-4c7b-899a-fca22bfdc4fb/meta_logo.png</image:loc>
      <image:title>CV4ARVR 2022 - Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622661883058-YFJXP5EEB17LD7TVHLKQ/headroom_logo.png</image:loc>
      <image:title>CV4ARVR 2022 - Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622664892300-OTL3L1102RSAYRAJMMS8/cmu-wordmark-stacked-k.png</image:loc>
      <image:title>CV4ARVR 2022 - Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665706129-INN6TAZRHNXQW5INZN3J/university-of-copenhagen-logo.png</image:loc>
      <image:title>CV4ARVR 2022 - Program</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665233780-RPKJ8DVJKW10PRBXSQ1O/cornell-tech-black.png</image:loc>
      <image:title>CV4ARVR 2022 - Program</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2022/people</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-10-24</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1075d6b3-d5c9-4c7b-899a-fca22bfdc4fb/meta_logo.png</image:loc>
      <image:title>CV4ARVR 2022 - People</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622661883058-YFJXP5EEB17LD7TVHLKQ/headroom_logo.png</image:loc>
      <image:title>CV4ARVR 2022 - People</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622664892300-OTL3L1102RSAYRAJMMS8/cmu-wordmark-stacked-k.png</image:loc>
      <image:title>CV4ARVR 2022 - People</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665706129-INN6TAZRHNXQW5INZN3J/university-of-copenhagen-logo.png</image:loc>
      <image:title>CV4ARVR 2022 - People</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665233780-RPKJ8DVJKW10PRBXSQ1O/cornell-tech-black.png</image:loc>
      <image:title>CV4ARVR 2022 - People</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/paget3</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-05-26</lastmod>
  </url>
  <url>
    <loc>https://xr.cornell.edu/contact-1</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-06-06</lastmod>
  </url>
  <url>
    <loc>https://xr.cornell.edu/workshop/2022/papers</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-06-17</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1075d6b3-d5c9-4c7b-899a-fca22bfdc4fb/meta_logo.png</image:loc>
      <image:title>CV4ARVR 2022 - Papers</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622661883058-YFJXP5EEB17LD7TVHLKQ/headroom_logo.png</image:loc>
      <image:title>CV4ARVR 2022 - Papers</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622664892300-OTL3L1102RSAYRAJMMS8/cmu-wordmark-stacked-k.png</image:loc>
      <image:title>CV4ARVR 2022 - Papers</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665706129-INN6TAZRHNXQW5INZN3J/university-of-copenhagen-logo.png</image:loc>
      <image:title>CV4ARVR 2022 - Papers</image:title>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/1622665233780-RPKJ8DVJKW10PRBXSQ1O/cornell-tech-black.png</image:loc>
      <image:title>CV4ARVR 2022 - Papers</image:title>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/new-page-1</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-12-13</lastmod>
  </url>
  <url>
    <loc>https://xr.cornell.edu/xr-at-cornell</loc>
    <changefreq>daily</changefreq>
    <priority>1.0</priority>
    <lastmod>2024-05-28</lastmod>
  </url>
  <url>
    <loc>https://xr.cornell.edu/people-1</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-06-06</lastmod>
  </url>
  <url>
    <loc>https://xr.cornell.edu/faq</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-06-06</lastmod>
  </url>
  <url>
    <loc>https://xr.cornell.edu/news-1</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-06-06</lastmod>
  </url>
  <url>
    <loc>https://xr.cornell.edu/paget2</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-05-26</lastmod>
  </url>
  <url>
    <loc>https://xr.cornell.edu/retreat</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2022-12-13</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/a95817f8-46d5-4496-bdbd-558eca190135/TECH_20190919_001.jpg</image:loc>
      <image:title>XR Retreat - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
  <url>
    <loc>https://xr.cornell.edu/collaboratory/prototyping-grant</loc>
    <changefreq>daily</changefreq>
    <priority>0.75</priority>
    <lastmod>2024-04-22</lastmod>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/54a2aead-2592-44ab-b556-cf031868cce7/xrc_1.jpg</image:loc>
      <image:title>XR Collaboratory Prototyping Grant - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
    <image:image>
      <image:loc>https://images.squarespace-cdn.com/content/v1/5c3f69e1cc8fedbc039ea739/5ed03641-01a2-4df3-8ee6-e22ab6483073/xrx_2.jpg</image:loc>
      <image:title>XR Collaboratory Prototyping Grant - Make it stand out</image:title>
      <image:caption>Whatever it is, the way you tell your story online can make all the difference.</image:caption>
    </image:image>
  </url>
</urlset>

