{"id":995568,"date":"2024-01-05T08:06:54","date_gmt":"2024-01-05T16:06:54","guid":{"rendered":"https:\/\/www.microsoft.com\/en-us\/research\/?post_type=msr-project&#038;p=995568"},"modified":"2025-03-31T11:56:28","modified_gmt":"2025-03-31T18:56:28","slug":"afmr-multicultural-analysis-and-empowerment","status":"publish","type":"msr-project","link":"https:\/\/www.microsoft.com\/en-us\/research\/project\/afmr-multicultural-analysis-and-empowerment\/","title":{"rendered":"AFMR: Multicultural Analysis and Empowerment"},"content":{"rendered":"<section class=\"mb-3 moray-highlight\">\n\t<div class=\"card-img-overlay mx-lg-0\">\n\t\t<div class=\"card-background  has-background- card-background--full-bleed\">\n\t\t\t<img loading=\"lazy\" decoding=\"async\" width=\"1920\" height=\"720\" src=\"https:\/\/www.microsoft.com\/en-us\/research\/wp-content\/uploads\/2024\/01\/Multicultural-Analysis-and-Empowerment-page-header_1920x720.png\" class=\"attachment-full size-full\" alt=\"white icon of an avatar inside of a magnifying glass surrounded by nodes on a green gradient background\" style=\"\" srcset=\"https:\/\/www.microsoft.com\/en-us\/research\/wp-content\/uploads\/2024\/01\/Multicultural-Analysis-and-Empowerment-page-header_1920x720.png 1920w, https:\/\/www.microsoft.com\/en-us\/research\/wp-content\/uploads\/2024\/01\/Multicultural-Analysis-and-Empowerment-page-header_1920x720-300x113.png 300w, https:\/\/www.microsoft.com\/en-us\/research\/wp-content\/uploads\/2024\/01\/Multicultural-Analysis-and-Empowerment-page-header_1920x720-1024x384.png 1024w, https:\/\/www.microsoft.com\/en-us\/research\/wp-content\/uploads\/2024\/01\/Multicultural-Analysis-and-Empowerment-page-header_1920x720-768x288.png 768w, https:\/\/www.microsoft.com\/en-us\/research\/wp-content\/uploads\/2024\/01\/Multicultural-Analysis-and-Empowerment-page-header_1920x720-1536x576.png 1536w, https:\/\/www.microsoft.com\/en-us\/research\/wp-content\/uploads\/2024\/01\/Multicultural-Analysis-and-Empowerment-page-header_1920x720-1600x600.png 1600w, https:\/\/www.microsoft.com\/en-us\/research\/wp-content\/uploads\/2024\/01\/Multicultural-Analysis-and-Empowerment-page-header_1920x720-240x90.png 240w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\" \/>\t\t<\/div>\n\t\t<!-- Foreground -->\n\t\t<div class=\"card-foreground d-flex mt-md-n5 my-lg-5 px-g px-lg-0\">\n\t\t\t<!-- Container -->\n\t\t\t<div class=\"container d-flex mt-md-n5 my-lg-5 \">\n\t\t\t\t<!-- Card wrapper -->\n\t\t\t\t<div class=\"w-100 w-lg-col-5\">\n\t\t\t\t\t<!-- Card -->\n\t\t\t\t\t<div class=\"card material-md-card py-5 px-md-5\">\n\t\t\t\t\t\t<div class=\"card-body \">\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t<a href=\"https:\/\/www.microsoft.com\/en-us\/research\/collaboration\/accelerating-foundation-models-research\/\" class=\"icon-link icon-link--reverse mb-2\" data-bi-cN=\"Accelerating Foundation Models Research\">\n\t\t\t\t\t\t\t\t\t<span class=\"c-glyph glyph-chevron-left\" aria-hidden=\"true\"><\/span>\n\t\t\t\t\t\t\t\t\tAccelerating Foundation Models Research\t\t\t\t\t\t\t\t<\/a>\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\n<h1 class=\"wp-block-heading\" id=\"multicultural-analysis-and-empowerment\">Multicultural Analysis and Empowerment<\/h1>\n\n\n\n<p><\/p>\n\n\t\t\t\t\t\t<\/div>\n\t\t\t\t\t<\/div>\n\t\t\t\t<\/div>\n\t\t\t<\/div>\n\t\t<\/div>\n\t<\/div>\n<\/section>\n\n\n\n\n\n<blockquote class=\"wp-block-quote is-layout-flow wp-block-quote-is-layout-flow\">\n<p><strong><em>Academic research plays such an important role in advancing science, technology, culture, and society. This grant program helps ensure this community has access to the latest and leading AI models.<\/em><\/strong><\/p>\n<cite>Brad Smith, Vice Chair and President<\/cite><\/blockquote>\n\n\n\n<div class=\"wp-block-columns is-layout-flex wp-container-core-columns-is-layout-9d6595d7 wp-block-columns-is-layout-flex\">\n<div class=\"wp-block-column is-layout-flow wp-block-column-is-layout-flow\" style=\"flex-basis:25%\"><\/div>\n\n\n\n<div class=\"wp-block-column is-layout-flow wp-block-column-is-layout-flow\" style=\"flex-basis:50%\">\n<figure class=\"wp-block-image aligncenter size-full is-resized\"><img loading=\"lazy\" decoding=\"async\" width=\"400\" height=\"400\" src=\"https:\/\/www.microsoft.com\/en-us\/research\/wp-content\/uploads\/2024\/01\/Improve-Human-Interaction_1.3.png\" alt=\"medium green icon of three people standing under an archway with a checkmark\" class=\"wp-image-996363\" style=\"width:auto;height:150px\" srcset=\"https:\/\/www.microsoft.com\/en-us\/research\/wp-content\/uploads\/2024\/01\/Improve-Human-Interaction_1.3.png 400w, https:\/\/www.microsoft.com\/en-us\/research\/wp-content\/uploads\/2024\/01\/Improve-Human-Interaction_1.3-300x300.png 300w, https:\/\/www.microsoft.com\/en-us\/research\/wp-content\/uploads\/2024\/01\/Improve-Human-Interaction_1.3-150x150.png 150w, https:\/\/www.microsoft.com\/en-us\/research\/wp-content\/uploads\/2024\/01\/Improve-Human-Interaction_1.3-180x180.png 180w, https:\/\/www.microsoft.com\/en-us\/research\/wp-content\/uploads\/2024\/01\/Improve-Human-Interaction_1.3-360x360.png 360w\" sizes=\"auto, (max-width: 400px) 100vw, 400px\" \/><\/figure>\n\n\n\n<h2 class=\"wp-block-heading has-text-align-center h4\" id=\"afmr-goal-improve-human-interactions-via-sociotechnical-research\">AFMR Goal: Improve human interactions via sociotechnical research<\/h2>\n\n\n\n<p class=\"has-text-align-center\">which increases trust, human ingenuity, creativity, and productivity, and decreases the digital divide while reducing the risks of developing AI which does not benefit individuals and society<\/p>\n<\/div>\n\n\n\n<div class=\"wp-block-column is-layout-flow wp-block-column-is-layout-flow\" style=\"flex-basis:25%\"><\/div>\n<\/div>\n\n\n\n<div style=\"padding-bottom:0; padding-top:0\" class=\"wp-block-msr-immersive-section alignfull row wp-block-msr-immersive-section\">\n\t\n\t<div class=\"container\">\n\t\t<div class=\"wp-block-msr-immersive-section__wrapper col-lg-11 col-xl-9 px-0 m-auto\">\n\t\t\t<div style=\"height:30px\" aria-hidden=\"true\" class=\"wp-block-spacer\"><\/div>\t\t<\/div>\n\t<\/div>\n\n\t<\/div>\n\n\n\n<p>The research projects primarily focus on enhancing language models, emphasizing underrepresented languages and cultures. Projects aim to improve the accuracy of health-related responses and fine-tune models for specific languages like Vietnamese and various Indian languages. Cultural intelligence is a key goal, promoting linguistic inclusivity and understanding model behavior with knowledge graph tools. Additional efforts involve developing model-editing techniques for interpretability and robustness, particularly in underrepresented languages. The overarching aim is to enhance language models for improved accuracy, adaptability, and inclusivity across diverse languages and cultures.<\/p>\n\n\n\n<div style=\"height:30px\" aria-hidden=\"true\" class=\"wp-block-spacer\"><\/div>\n\n\n\n\n\n<p><strong>University of Pretoria<\/strong>: Vukosi Marivate (PI)<\/p>\n\n\n\n<p>The goal of this study is to extend the capabilities of foundation models [6] for ESL-speaking African communities by enhancing their ability to understand and generate content that accurately reflects the continent\u2019s socio-cultural specifics. This involves adapting models to users\u2019 specific needs, and linguistic styles; making the models more accessible and equitable for underrepresented groups. Through localized tuning and feedback, the project seeks to reduce performance gaps and tailor foundation models for diverse uses such as in legal, finance, and agriculture in Africa. The overarching goal is to provide users with the agency to customize foundation (language) models to their unique situations. <\/p>\n\n\n\n\n\n<p><strong>University of Waterloo<\/strong>: Jimmy Lin (PI)<\/p>\n\n\n\n<p>The proposal aims to build robust foundation models for African languages to bridge the technology gap affecting these communities. The project objectives include design and optimization of model architecture, multilingual transfer learning, evaluation, and making the resources openly available.<\/p>\n\n\n\n<p><strong>Related papers:<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/dl.acm.org\/doi\/abs\/10.1145\/3626772.3657675\" target=\"_blank\" rel=\"noopener noreferrer\">Towards Robust QA Evaluation via Open LLMs<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n\n\n\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/cs.uwaterloo.ca\/~jimmylin\/publications\/Adeyemi_etal_ACL2024.pdf\" target=\"_blank\" rel=\"noopener noreferrer\">Zero-Shot Cross-Lingual Reranking with Large Language Models for Low-Resource Languages<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n<\/ul>\n\n\n\n\n\n<p><strong>University of British Columbia<\/strong>: Vered Shwartz (PI)<\/p>\n\n\n\n<p>The proposal aims at addressing the cultural bias in Large Language Models (LLMs), which currently hold a heavy Western, North American, or even US-centric lens. By constructing a new dataset consisting of narratives that evoke social norms, the proposal aims to test the values of English LLMs as they reflect in real-world scenarios and better align the responses of LLMs with the values of diverse cultures.<\/p>\n\n\n\n<p><strong>Related paper:<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/aclanthology.org\/2024.emnlp-main.385\/\" target=\"_blank\" rel=\"noopener noreferrer\">From Local Concepts to Universals: Evaluating the Multicultural Understanding of Vision-Language Models<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n<\/ul>\n\n\n\n\n\n<p><strong>Kennesaw State University<\/strong>: Dylan Goldblatt (PI)<\/p>\n\n\n\n<p>This project aims to explore applications of AI to provide personalized and culturally-responsive support for second language learners at KSU. The objectives are to establish whether an AI learning support approach improves performance and engagement in language courses; if the approach helps narrow the achievement and engagement gap for underprepared students; and whether the support approach is successful across various languages.<\/p>\n\n\n\n\n\n<p><strong>New York University<\/strong>: Duygu Ataman (PI)<\/p>\n\n\n\n<p>Recent advances have brought Large Language Models (LLMs) to an important stage that will play a significant role in shaping the next generation of applications in essential social domains, such as education and the media. Despite the continuous exploration of its remarkable capabilities, the performance of state-of-the-art models in most languages typically falls short of matching their counterparts in English.&nbsp;This project aims to bridge this gap by developing an adaptation methodology to improve LLM compatibility with under-resourced languages. The study uses Turkic languages as a case study, whose grammatical features present a challenging yet ideal setting for assessing NLP models.<\/p>\n\n\n\n<p><strong>Related papers:<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/arxiv.org\/abs\/2410.12656?\" target=\"_blank\" rel=\"noopener noreferrer\">Evaluating Morphological Compositional Generalization in Large Language Models<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n\n\n\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/arxiv.org\/abs\/2407.02337\" target=\"_blank\" rel=\"noopener noreferrer\">Open foundation models for Azerbaijani language<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n<\/ul>\n\n\n\n\n\n<p><strong>Georgia Institute of Technology<\/strong>: Srijan Kumar (PI)<\/p>\n\n\n\n<p>Investigate the capabilities of GPT-4 and its effectiveness in answering health-related queries in various languages. Our research will develop a comprehensive understanding of how broadly applicable the health-related reasoning abilities of foundational models are beyond the English language.<\/p>\n\n\n\n<p><strong>Related papers:<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/arxiv.org\/abs\/2310.13132\" target=\"_blank\" rel=\"noopener noreferrer\">Better to Ask in English: Cross-Lingual Evaluation of Large Language Models for Healthcare Queries<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n\n\n\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/arxiv.org\/abs\/2306.11065\" target=\"_blank\" rel=\"noopener noreferrer\">Cross-Modal Attribute Insertions for Assessing the Robustness of Vision-and-Language Learning<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n<\/ul>\n\n\n\n\n\n<p><strong>IIT Gandhinagar<\/strong>: Mayank Singh (PI)<\/p>\n\n\n\n<p>The proposal plans to develop model-editing techniques that can localize multilingual information and selectively update the parameters of Large Language Models (LLMs). The project involves experimenting with parameter-preserving and parameter-updating editing techniques. The goal of the project is to enhance LLMs in terms of interpretability, robustness, and factual accuracy for diverse communities.<\/p>\n\n\n\n<p><strong>Related papers:<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/aclanthology.org\/2024.findings-eacl.140\/\" target=\"_blank\" rel=\"noopener noreferrer\">Cross-lingual Editing in Multilingual Language Models<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n\n\n\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/arxiv.org\/abs\/2402.11997\" target=\"_blank\" rel=\"noopener noreferrer\">Remember This Event That Year? Assessing Temporal Information and Reasoning in Large Language Models<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n<\/ul>\n\n\n\n\n\n<p><strong>IIT Bombay<\/strong>: Soumen Chakrabarti (PI)<\/p>\n\n\n\n<p>Our focus will be on the behavior of the latest generation of LLMs and their interaction with knowledge graph (KG) retrieval tools, in the context of Indian low-resource languages (LRLs), because it is easier to locate users of such languages locally, and code-switched texts.<\/p>\n\n\n\n\n\n<p><strong>University College London<\/strong>: Pontus Stenetorp (PI)<\/p>\n\n\n\n<p>The proposal aims to improve the performance of Large Language Models (LLMs) on African languages by augmenting low-resource African text data with synthetic data. It proposes a two-step method involving benchmarking tasks to understand the performance gap followed by generating training data for African languages.<\/p>\n\n\n\n<p><strong>Related papers:<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/arxiv.org\/abs\/2311.09828\" target=\"_blank\" rel=\"noopener noreferrer\">AfriMTE and AfriCOMET: Enhancing COMET to Embrace Under-resourced African Languages<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n\n\n\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/arxiv.org\/abs\/2311.09569\" target=\"_blank\" rel=\"noopener noreferrer\">Strings from the Library of Babel: Random Sampling as a Strong Baseline for Prompt Optimisation<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n<\/ul>\n\n\n\n\n\n<p><strong>IIT Kharagpur<\/strong>: Niloy Ganguly (PI)<\/p>\n\n\n\n<p>The proposal plans to analyze the performance of Large Language Models (LLMs) for Indian languages. Despite their proven utility, LLMs have not shown significant improvement for tasks in Indian languages compared to high-resource languages, possibly due to an underrepresented training corpus. The research aims to extensively benchmark LLMs&#8217; capabilities, strengths, and weaknesses for various tasks in Indian languages. The goal is to identify &#8216;good&#8217;, &#8216;bad&#8217;, and &#8216;ugly&#8217; performance cases and develop strategies for improvement, potentially addressing the underrepresentation of Indian languages in LLM performances.<\/p>\n\n\n\n<p><strong>Related paper:<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/aclanthology.org\/2024.findings-emnlp.920\/\" target=\"_blank\" rel=\"noopener noreferrer\">Cost-Performance Optimization for Processing Low-Resource Language Tasks Using Commercial LLMs<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n<\/ul>\n\n\n\n\n\n<p><strong>KAIST<\/strong>: Alice Oh (PI)<\/p>\n\n\n\n<p>Develop a culturally-intelligent language model by creating a red-teaming dataset that evaluates actions in different cultures and testing the language model\u2019s responses in various language and cultural settings. The goal is to improve NLP models\u2019 awareness of cultural diversity and their ability to generate culturally intelligent responses.<\/p>\n\n\n\n\n\n<p><strong>Saarland University<\/strong>: Dietrich Klakow (PI)<\/p>\n\n\n\n<p>The project aims to advance research in multilingual foundation models, focusing on closing the gap in capabilities between English and non-English languages. The team plans to analyze the cross-lingual transfer abilities of foundation models, and seek to enhance these abilities with a focus on in-context learning.<\/p>\n\n\n\n<p><strong>Related papers:<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/arxiv.org\/abs\/2501.06374\" target=\"_blank\" rel=\"noopener noreferrer\">AFRIDOC-MT: Document-level MT Corpus for African Languages<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n\n\n\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/arxiv.org\/abs\/2402.12976\" target=\"_blank\" rel=\"noopener noreferrer\">The Impact of Demonstrations on Multilingual In-Context Learning: A Multidimensional Analysis<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n<\/ul>\n\n\n\n\n\n<p><strong>IIT Bombay<\/strong>: Soumen Chakrabarti (PI)<\/p>\n\n\n\n<p>We are probing LLMs to detect presence or absence of knowledge in multi-lingual knowledge graphs, with a focus on low resource languages (LRLs). As we move from popular to even slightly obscure entities and relations, we are finding that the coverage and reliability of LLMs fall of perceptibly. Packaging a knowledge query in a prompt context, as well as fair evaluation of text output against structured gold knowledge, are proving challenging. We are also comparing LLMs against pure graph embedding techniques. We are finding that these two families of techniques make uncorrelated errors, suggesting a unified architecture leveraging the strengths of both.<\/p>\n\n\n\n<p><strong>Related papers:<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/aclanthology.org\/2023.emnlp-main.868\/\" target=\"_blank\" rel=\"noopener noreferrer\">CRUSH4SQL: Collective Retrieval Using Schema Hallucination For Text2SQL<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n\n\n\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/arxiv.org\/abs\/2312.05571\" target=\"_blank\" rel=\"noopener noreferrer\">Frugal LMs Trained to Invoke Symbolic Solvers Achieve Parameter-Efficient Arithmetic Reasoning<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n\n\n\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/arxiv.org\/abs\/2310.18338\" target=\"_blank\" rel=\"noopener noreferrer\">Small Language Models Fine-tuned to Coordinate Larger Language Models improve Complex Reasoning<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n<\/ul>\n\n\n\n\n\n<p><strong>Ho Chi Minh City University of Technology<\/strong>: Duc Nguyen (PI)<\/p>\n\n\n\n<p>This proposal aims to create a finetuned large language model (LLaMa-2) specifically for Vietnamese using the QLoRa technique. The researchers seek to bring about proficiency in Vietnamese that rivals human-level communication while maintaining the vast knowledge base of the original model. An evaluation against other commercial models is also planned.<\/p>\n\n\n\n<p><strong>Related paper:<\/strong><\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><a class=\"msr-external-link glyph-append glyph-append-open-in-new-tab glyph-append-xsmall\" href=\"https:\/\/arxiv.org\/abs\/2403.02715\" target=\"_blank\" rel=\"noopener noreferrer\">Crossing Linguistic Horizons: Finetuning and Comprehensive Evaluation of Vietnamese Large Language Models<span class=\"sr-only\"> (opens in new tab)<\/span><\/a><\/li>\n<\/ul>\n\n\n\n\n\n<div style=\"height:25px\" aria-hidden=\"true\" class=\"wp-block-spacer\"><\/div>\n\n\n","protected":false},"excerpt":{"rendered":"<p>Academic research plays such an important role in advancing science, technology, culture, and society. This grant program helps ensure this community has access to the latest and leading AI models. which increases trust, human ingenuity, creativity, and productivity, and decreases the digital divide while reducing the risks of developing AI which does not benefit individuals [&hellip;]<\/p>\n","protected":false},"featured_media":995892,"template":"","meta":{"msr-url-field":"","msr-podcast-episode":"","msrModifiedDate":"","msrModifiedDateEnabled":false,"ep_exclude_from_search":false,"_classifai_error":"","footnotes":""},"research-area":[13556],"msr-locale":[268875],"msr-impact-theme":[],"msr-pillar":[],"class_list":["post-995568","msr-project","type-msr-project","status-publish","has-post-thumbnail","hentry","msr-research-area-artificial-intelligence","msr-locale-en_us","msr-archive-status-active"],"msr_project_start":"","related-publications":[],"related-downloads":[],"related-videos":[],"related-groups":[],"related-events":[],"related-opportunities":[],"related-posts":[],"related-articles":[],"tab-content":[],"slides":[],"related-researchers":[],"msr_research_lab":[],"msr_impact_theme":[],"_links":{"self":[{"href":"https:\/\/www.microsoft.com\/en-us\/research\/wp-json\/wp\/v2\/msr-project\/995568","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/www.microsoft.com\/en-us\/research\/wp-json\/wp\/v2\/msr-project"}],"about":[{"href":"https:\/\/www.microsoft.com\/en-us\/research\/wp-json\/wp\/v2\/types\/msr-project"}],"version-history":[{"count":24,"href":"https:\/\/www.microsoft.com\/en-us\/research\/wp-json\/wp\/v2\/msr-project\/995568\/revisions"}],"predecessor-version":[{"id":1135564,"href":"https:\/\/www.microsoft.com\/en-us\/research\/wp-json\/wp\/v2\/msr-project\/995568\/revisions\/1135564"}],"wp:featuredmedia":[{"embeddable":true,"href":"https:\/\/www.microsoft.com\/en-us\/research\/wp-json\/wp\/v2\/media\/995892"}],"wp:attachment":[{"href":"https:\/\/www.microsoft.com\/en-us\/research\/wp-json\/wp\/v2\/media?parent=995568"}],"wp:term":[{"taxonomy":"msr-research-area","embeddable":true,"href":"https:\/\/www.microsoft.com\/en-us\/research\/wp-json\/wp\/v2\/research-area?post=995568"},{"taxonomy":"msr-locale","embeddable":true,"href":"https:\/\/www.microsoft.com\/en-us\/research\/wp-json\/wp\/v2\/msr-locale?post=995568"},{"taxonomy":"msr-impact-theme","embeddable":true,"href":"https:\/\/www.microsoft.com\/en-us\/research\/wp-json\/wp\/v2\/msr-impact-theme?post=995568"},{"taxonomy":"msr-pillar","embeddable":true,"href":"https:\/\/www.microsoft.com\/en-us\/research\/wp-json\/wp\/v2\/msr-pillar?post=995568"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}