Publications
2024
-
PreprintGlider: Global and Local Instruction-Driven Expert RouterYadav, Prateek*, Li, Pingzhi*, Yoon, Jaehong, Peng, Jie, Sung, Yi-Lin, Bansal, Mohit, and Chen, Tianlong2024
@misc{yadav2023compeft, abbr = {Preprint}, bibtex_show = {true}, selected = {true}, title = {Glider: Global and Local Instruction-Driven Expert Router}, author = {Yadav, Prateek* and Li, Pingzhi* and Yoon, Jaehong and Peng, Jie and Sung, Yi-Lin and Bansal, Mohit and Chen, Tianlong}, year = {2024}, arxiv = {2410.07172}, archiveprefix = {arXiv}, primaryclass = {cs.LG} }
-
PreprintWhat Matters for Model Merging at Scale?Yadav, Prateek, Vu, Tu, Lai, Jonathan, Chronopoulou, Alexandra, Faruqui, Manaal, Bansal, Mohit, and Munkhdalai, Tsendsuren2024
@misc{yadav2024mattersmodelmergingscale, abbr = {Preprint}, bibtex_show = {true}, selected = {true}, title = {What Matters for Model Merging at Scale?}, author = {Yadav, Prateek and Vu, Tu and Lai, Jonathan and Chronopoulou, Alexandra and Faruqui, Manaal and Bansal, Mohit and Munkhdalai, Tsendsuren}, year = {2024}, arxiv = {2410.03617}, archiveprefix = {arXiv}, primaryclass = {cs.LG} }
-
PreprintA Survey on Model MoErging: Recycling and Routing Among Specialized Experts for Collaborative LearningYadav, Prateek, Raffel, Colin, Muqeeth, Mohammed, Caccia, Lucas, Liu, Haokun, Chen, Tianlong, Bansal, Mohit, Choshen, Leshem, and Sordoni, Alessandro2024
@misc{yadav2024moerging, abbr = {Preprint}, bibtex_show = {true}, selected = {true}, title = {A Survey on Model MoErging: Recycling and Routing Among Specialized Experts for Collaborative Learning}, author = {Yadav, Prateek and Raffel, Colin and Muqeeth, Mohammed and Caccia, Lucas and Liu, Haokun and Chen, Tianlong and Bansal, Mohit and Choshen, Leshem and Sordoni, Alessandro}, year = {2024}, arxiv = {2408.07057}, archiveprefix = {arXiv}, primaryclass = {cs.LG} }
-
PreprintBigCodeBench: Benchmarking Code Generation with Diverse Function Calls and Complex InstructionsZhuo, Terry Yue, Vu, Minh Chien, Chim, Jenny, Hu, Han, Yu, Wenhao, Widyasari, Ratnadira, Yusuf, Imam Nur Bani, Zhan, Haolan, He, Junda, Paul, Indraneil, Brunner, Simon, Gong, Chen, Hoang, Thong, Zebaze, Armel Randy, Hong, Xiaoheng, Li, Wen-Ding, Kaddour, Jean, Xu, Ming, Zhang, Zhihan, Yadav, Prateek, Jain, Naman, Gu, Alex, Cheng, Zhoujun, Liu, Jiawei, Liu, Qian, Wang, Zijian, Lo, David, Hui, Binyuan, Muennighoff, Niklas, Fried, Daniel, Du, Xiaoning, Vries, Harm, and Werra, Leandro Von2024
@misc{zhuo2024bigcodebenchbenchmarkingcodegeneration, abbr = {Preprint}, bibtex_show = {true}, selected = {false}, title = {BigCodeBench: Benchmarking Code Generation with Diverse Function Calls and Complex Instructions}, author = {Zhuo, Terry Yue and Vu, Minh Chien and Chim, Jenny and Hu, Han and Yu, Wenhao and Widyasari, Ratnadira and Yusuf, Imam Nur Bani and Zhan, Haolan and He, Junda and Paul, Indraneil and Brunner, Simon and Gong, Chen and Hoang, Thong and Zebaze, Armel Randy and Hong, Xiaoheng and Li, Wen-Ding and Kaddour, Jean and Xu, Ming and Zhang, Zhihan and Yadav, Prateek and Jain, Naman and Gu, Alex and Cheng, Zhoujun and Liu, Jiawei and Liu, Qian and Wang, Zijian and Lo, David and Hui, Binyuan and Muennighoff, Niklas and Fried, Daniel and Du, Xiaoning and de Vries, Harm and Werra, Leandro Von}, year = {2024}, arxiv = {2406.15877}, archiveprefix = {arXiv}, primaryclass = {cs.SE} }
-
PreprintAurora-M: The First Open Source Multilingual Language Model Red-teamed according to the U.S. Executive OrderNakamura, Taishi, Mishra, Mayank, Tedeschi, Simone, Chai, Yekun, Stillerman, Jason T, Friedrich, Felix, Yadav, Prateek, Laud, Tanmay, Chien, Vu Minh, Zhuo, Terry Yue, Misra, Diganta, Bogin, Ben, Vu, Xuan-Son, Karpinska, Marzena, Dantuluri, Arnav Varma, Kusa, Wojciech, and Tommaso,2024
@misc{nakamura2024auroramopensourcemultilingual, abbr = {Preprint}, bibtex_show = {true}, selected = {false}, title = {Aurora-M: The First Open Source Multilingual Language Model Red-teamed according to the U.S. Executive Order}, author = {Nakamura, Taishi and Mishra, Mayank and Tedeschi, Simone and Chai, Yekun and Stillerman, Jason T and Friedrich, Felix and Yadav, Prateek and Laud, Tanmay and Chien, Vu Minh and Zhuo, Terry Yue and Misra, Diganta and Bogin, Ben and Vu, Xuan-Son and Karpinska, Marzena and Dantuluri, Arnav Varma and Kusa, Wojciech and Tommaso}, year = {2024}, arxiv = {2404.00399}, archiveprefix = {arXiv}, primaryclass = {cs.CL} }
-
PreprintComPEFT: Compression for Communicating Parameter Efficient Updates via Sparsification and QuantizationYadav, Prateek, Choshen, Leshem, Raffel, Colin, and Bansal, Mohit2024
@misc{yadav2023compefu, abbr = {Preprint}, bibtex_show = {true}, selected = {true}, code = {https://github.com/prateeky2806/compeft}, title = {ComPEFT: Compression for Communicating Parameter Efficient Updates via Sparsification and Quantization}, author = {Yadav, Prateek and Choshen, Leshem and Raffel, Colin and Bansal, Mohit}, year = {2024}, arxiv = {2311.13171}, archiveprefix = {arXiv}, primaryclass = {cs.LG} }
-
ICLR [Spotlight]Merge, Then Compress: Demystify Efficient SMoE with Hints from Its Routing PolicyLi, Pingzhi, Zhang, Zhenyu, Yadav, Prateek, Sung, Yi-Lin, Cheng, Yu, Bansal, Mohit, and Chen, TianlongIn International Conference on Learning Representations 2024
@inproceedings{li2023merge, abbr = {ICLR [Spotlight]}, bibtex_show = {true}, selected = {true}, code = {https://github.com/UNITES-Lab/MC-SMoE}, title = {Merge, Then Compress: Demystify Efficient SMoE with Hints from Its Routing Policy}, author = {Li, Pingzhi and Zhang, Zhenyu and Yadav, Prateek and Sung, Yi-Lin and Cheng, Yu and Bansal, Mohit and Chen, Tianlong}, booktitle = {International Conference on Learning Representations}, year = {2024}, arxiv = {2310.01334}, archiveprefix = {arXiv}, primaryclass = {cs.LG} }
-
ICLRD2 Pruning: Message Passing for Balancing Diversity & Difficulty in Data PruningMaharana, Adyasha, Yadav, Prateek, and Bansal, MohitIn International Conference on Learning Representations 2024
@inproceedings{maharana2023d2pruning, abbr = {ICLR}, bibtex_show = {true}, selected = {true}, code = {https://github.com/adymaharana/d2pruning}, title = {D2 Pruning: Message Passing for Balancing Diversity & Difficulty in Data Pruning}, author = {Maharana, Adyasha and Yadav, Prateek and Bansal, Mohit}, booktitle = {International Conference on Learning Representations}, year = {2024}, archiveprefix = {arXiv}, primaryclass = {cs.LG}, arxiv = {2310.07931} }
2023
-
NeurIPS’23Resolving Interference When Merging ModelsYadav, Prateek, Tam, Derek, Choshen, Leshem, Raffel, Colin, and Bansal, MohitIn Neural Information Processing Systems 2023
Transfer learning – i.e., further fine-tuning a pre-trained model on a downstream task – can confer significant advantages, including improved downstream performance, faster convergence, and better sample efficiency. These advantages have led to a proliferation of task-specific fine-tuned models, which typically can only perform a single task and do not benefit from one another. Recently, model merging techniques have emerged as a solution to combine multiple task-specific models into a single multitask model without performing additional training. However, existing merging methods often ignore the interference between parameters of different models, resulting in large performance drops when merging multiple models. In this paper, we demonstrate that prior merging techniques inadvertently lose valuable information due to two major sources of interference: (a) interference due to redundant parameter values and (b) disagreement on the sign of a given parameter’s values across models. To address this, we propose our method, Trim, Elect Sign & Merge (TIES-Merging), which introduces three novel steps when merging models: (1) resetting parameters that only changed a small amount during fine-tuning, (2) resolving sign conflicts, and (3) merging only the parameters that are in alignment with the final agreed-upon sign. We find that TIES-Merging outperforms several existing methods in diverse settings covering a range of modalities, domains, number of tasks, model sizes, architectures, and fine-tuning settings. We further analyze the impact of different types of interference on model parameters, and highlight the importance of resolving sign interference.
@inproceedings{yadav2023ties-merging, abbr = {NeurIPS'23}, bibtex_show = {true}, selected = {true}, code = {https://github.com/prateeky2806/ties-merging}, title = {Resolving Interference When Merging Models}, author = {Yadav, Prateek and Tam, Derek and Choshen, Leshem and Raffel, Colin and Bansal, Mohit}, booktitle = {Neural Information Processing Systems}, year = {2023}, arxiv = {2306.01708}, archiveprefix = {arXiv}, primaryclass = {cs.LG} }
-
NeurIPS’23Self-Chained Image-Language Model for Video Localization and Question AnsweringYu, Shoubin, Cho, Jaemin, Yadav, Prateek, and Bansal, MohitIn Neural Information Processing Systems 2023
Recent studies have shown promising results on utilizing pre-trained image-language models for video question answering. While these image-language models can efficiently bootstrap the representation learning of video-language models, they typically concatenate uniformly sampled video frames as visual inputs without explicit language-aware, temporal modeling. When only a portion of a video input is relevant to the language query, such uniform frame sampling can often lead to missing important visual cues. Although humans often find a video moment to focus on and rewind the moment to answer questions, training a query-aware video moment localizer often requires expensive annotations and high computational costs. To address this issue, we propose Self-Chained Video Localization-Answering (SeViLA), a novel framework that leverages a single image-language model (BLIP-2) to tackle both temporal keyframe localization and QA on videos. SeViLA framework consists of two modules: Localizer and Answerer, where both are parameter-efficiently fine-tuned from BLIP-2. We chain these modules for cascaded inference and self-refinement. First, in the forward chain, the Localizer finds multiple language-aware keyframes in a video, which the Answerer uses to predict the answer. Second, in the reverse chain, the Answerer generates keyframe pseudo-labels to refine the Localizer, alleviating the need for expensive video moment localization annotations. SeViLA outperforms several strong baselines/previous works on five video QA and event prediction tasks, and achieves the state-of-the-art in both fine-tuning (NExT-QA, STAR) and zero-shot (NExT-QA, STAR, How2QA, VLEP) settings. We show a comprehensive analysis, e.g., the impact of Localizer, comparisons of Localizer with other temporal localization models, pre-training/self-refinement of Localizer, and varying the number of keyframes.
@inproceedings{yu2023self, abbr = {NeurIPS'23}, bibtex_show = {true}, selected = {true}, code = {https://github.com/Yui010206/SeViLA}, title = {Self-Chained Image-Language Model for Video Localization and Question Answering}, author = {Yu, Shoubin and Cho, Jaemin and Yadav, Prateek and Bansal, Mohit}, booktitle = {Neural Information Processing Systems}, year = {2023}, arxiv = {2305.06988}, archiveprefix = {arXiv}, primaryclass = {cs.CV} }
-
ACL’23Exploring Continual Learning for Code Generation ModelsYadav, Prateek, Sun, Qing, Ding, Hantian, Li, Xiaopeng, Zhang, Dejiao, Tan, Ming, Xiaofei, Ma, Bhatia, Parminder, Nallapati, Ramesh, Ramanathan, Murali Krishna, Bansal, Mohit, and Bing, XiangIn Association for Computational Linguistics 2023
Large-scale code generation models such as Codex and CodeT5 have achieved impressive performance. However, libraries are upgraded or deprecated very frequently and re-training large-scale language models is computationally expensive. Therefore, Continual Learning (CL) is an important aspect that remains underexplored in the code domain. In this paper, we introduce a benchmark called CodeTask-CL that covers a wide range of tasks, including code generation, translation, summarization, and refinement, with different input and output programming languages. Next, on our CodeTask-CL benchmark, we compare popular CL techniques from NLP and Vision domains. We find that effective methods like Prompt Pooling (PP) suffer from catastrophic forgetting due to the unstable training of the prompt selection mechanism caused by stark distribution shifts in coding tasks. We address this issue with our proposed method, Prompt Pooling with Teacher Forcing (PP-TF), that stabilizes training by enforcing constraints on the prompt selection mechanism and leads to a 21.54% improvement over Prompt Pooling. Along with the benchmark, we establish a training pipeline that can be used for CL on code models, which we believe can motivate further development of CL methods for code models.
@inproceedings{yadav2023code-cl, abbr = {ACL'23}, bibtex_show = {true}, selected = {true}, author = {Yadav, Prateek and Sun, Qing and Ding, Hantian and Li, Xiaopeng and Zhang, Dejiao and Tan, Ming and Xiaofei, Ma and Bhatia, Parminder and Nallapati, Ramesh and Ramanathan, Murali Krishna and Bansal, Mohit and Bing, Xiang}, title = {Exploring Continual Learning for Code Generation Models}, booktitle = {Association for Computational Linguistics}, year = {2023}, pdf = {codecl.pdf} }
-
ACL’23Exclusive Supermask Subnetwork Training for Continual LearningYadav, Prateek, and Bansal, MohitIn Findings in Association for Computational Linguistics 2023
Continual Learning (CL) methods focus on accumulating knowledge over time while avoiding catastrophic forgetting. Recently, Wortsman et al. (2020) proposed a CL method, SupSup, which uses a randomly initialized, fixed base network (model) and finds a supermask for each new task that selectively keeps or removes each weight to produce a subnetwork. They prevent forgetting as the network weights are not being updated. Although there is no forgetting, the performance of SupSup is sub-optimal because fixed weights restrict its representational power. Furthermore, there is no accumulation or transfer of knowledge inside the model when new tasks are learned. Hence, we propose EXSSNET (Exclusive Supermask SubNEtwork Training), that performs exclusive and non-overlapping subnetwork weight training. This avoids conflicting updates to the shared weights by subsequent tasks to improve performance while still preventing forgetting. Furthermore, we propose a novel KNN-based Knowledge Transfer (KKT) module that utilizes previously acquired knowledge to learn new tasks better and faster. We demonstrate that EXSSNET outperforms strong previous methods on both NLP and Vision domains while preventing forgetting. Moreover, EXSSNET is particularly advantageous for sparse masks that activate 2-10% of the model parameters, resulting in an average improvement of 8.3% over SupSup. Furthermore, EXSSNET scales to a large number of tasks (100).
@inproceedings{yadav2023exssnet, abbr = {ACL'23}, bibtex_show = {true}, selected = {true}, code = {https://github.com/prateeky2806/exessnet}, author = {Yadav, Prateek and Bansal, Mohit}, title = {Exclusive Supermask Subnetwork Training for Continual Learning}, booktitle = {Findings in Association for Computational Linguistics}, year = {2023}, pdf = {exssnet.pdf} }
2022
-
ACL’22Explanation Graph Generation via Pre-trained Language Models: An Empirical Study with Contrastive LearningSaha, Swarnadeep, Yadav, Prateek, and Bansal, MohitIn Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) 2022
Pre-trained sequence-to-sequence language models have led to widespread success in many natural language generation tasks. However, there has been relatively less work on analyzing their ability to generate structured outputs such as graphs. Unlike natural language, graphs have distinct structural and semantic properties in the context of a downstream NLP task, e.g., generating a graph that is connected and acyclic can be attributed to its structural constraints, while the semantics of a graph can refer to how meaningfully an edge represents the relation between two node concepts. In this work, we study pre-trained language models that generate explanation graphs in an end-to-end manner and analyze their ability to learn the structural constraints and semantics of such graphs. We first show that with limited supervision, pre-trained language models often generate graphs that either violate these constraints or are semantically incoherent. Since curating large amount of human-annotated graphs is expensive and tedious, we propose simple yet effective ways of graph perturbations via node and edge edit operations that lead to structurally and semantically positive and negative graphs. Next, we leverage these graphs in different contrastive learning models with Max-Margin and InfoNCE losses. Our methods lead to significant improvements in both structural and semantic accuracy of explanation graphs and also generalize to other similar graph generation tasks. Lastly, we show that human errors are the best negatives for contrastive learning and also that automatically generating more such human-like negative graphs can lead to further improvements.
@inproceedings{saha2022explanation, abbr = {ACL'22}, bibtex_show = {true}, selected = {false}, code = {https://github.com/swarnaHub/ExplagraphGen}, poster = {https://drive.google.com/file/d/13U1XX99efxzkQzmBGQv_P3nN79DnKukc/view?usp=sharing}, title = {Explanation Graph Generation via Pre-trained Language Models: An Empirical Study with Contrastive Learning}, author = {Saha, Swarnadeep and Yadav, Prateek and Bansal, Mohit}, booktitle = {Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, month = nov, year = {2022}, publisher = {Association for Computational Linguistics} }
-
PreprintLow-Cost Algorithmic Recourse for Users With Uncertain Cost FunctionsYadav, Prateek, Hase, Peter, and Bansal, Mohit2022
@misc{yadav2021lowcost, abbr = {Preprint}, bibtex_show = {false}, selected = {true}, code = {https://github.com/prateeky2806/EMC-COLS-recourse}, title = {Low-Cost Algorithmic Recourse for Users With Uncertain Cost Functions}, author = {Yadav, Prateek and Hase, Peter and Bansal, Mohit}, year = {2022}, arxiv = {2111.01235}, archiveprefix = {arXiv}, primaryclass = {cs.LG} }
2021
-
EMNLP’22 [ORAL]ExplaGraphs: An Explanation Graph Generation Task for Structured Commonsense ReasoningSaha, Swarnadeep, Yadav, Prateek, Bauer, Lisa, and Bansal, MohitIn Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP) 2021
Despite the promising results of current cross-lingual models for spoken language understanding systems, they still suffer from imperfect cross-lingual representation alignments between the source and target languages, which makes the performance sub-optimal. To cope with this issue, we propose a regularization approach to further align word-level and sentence-level representations across languages without any external resource. First, we regularize the representation of user utterances based on their corresponding labels. Second, we regularize the latent variable model (Liu et al., 2019) by leveraging adversarial training to disentangle the latent variables. Experiments on the cross-lingual spoken language understanding task show that our model outperforms current state-of-the-art methods in both few-shot and zero-shot scenarios, and our model, trained on a few-shot setting with only 3% of the target language training data, achieves comparable performance to the supervised training with all the training data.
@inproceedings{saha2021explagraphs, abbr = {EMNLP'22 [ORAL]}, bibtex_show = {true}, selected = {true}, code = {https://github.com/swarnaHub/ExplaGraphs}, poster = {https://drive.google.com/file/d/1EzhGbP3rJlH6X49xAYsApiM_jYfc1V8k/view?usp=sharing}, arxiv = {2104.07644}, title = {ExplaGraphs: An Explanation Graph Generation Task for Structured Commonsense Reasoning}, author = {Saha, Swarnadeep and Yadav, Prateek and Bauer, Lisa and Bansal, Mohit}, booktitle = {Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)}, month = nov, year = {2021}, publisher = {Association for Computational Linguistics} }
-
NAACL’22 [ORAL]multiPRover: Generating Multiple Proofs for Improved Interpretability in Rule ReasoningSaha, Swarnadeep, Yadav, Prateek, and Bansal, MohitIn Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies 2021
We focus on a type of linguistic formal reasoning where the goal is to reason over explicit knowledge in the form of natural language facts and rules (Clark et al., 2020). A recent work, named PRover (Saha et al., 2020), performs such reasoning by answering a question and also generating a proof graph that explains the answer. However, compositional reasoning is not always unique and there may be multiple ways of reaching the correct answer. Thus, in our work, we address a new and challenging problem of generating multiple proof graphs for reasoning over natural language rule-bases. Each proof provides a different rationale for the answer, thereby improving the interpretability of such reasoning systems. In order to jointly learn from all proof graphs and exploit the correlations between multiple proofs for a question, we pose this task as a set generation problem over structured output spaces where each proof is represented as a directed graph. We propose two variants of a proof-set generation model, multiPRover. Our first model, Multilabel-multiPRover, generates a set of proofs via multi-label classification and implicit conditioning between the proofs; while the second model, Iterative-multiPRover, generates proofs iteratively by explicitly conditioning on the previously generated proofs. Experiments on multiple synthetic, zero-shot, and human-paraphrased datasets reveal that both multiPRover models significantly outperform PRover on datasets containing multiple gold proofs. Iterative-multiPRover obtains state-of-the-art proof F1 in zero-shot scenarios where all examples have single correct proofs. It also generalizes better to questions requiring higher depths of reasoning where multiple proofs are more frequent.
@inproceedings{saha2021multiprover, abbr = {NAACL'22 [ORAL]}, bibtex_show = {true}, selected = {true}, code = {https://github.com/swarnaHub/multiPRover}, poster = {https://drive.google.com/file/d/1_TUoWE8xd2w6T4zvAsdHatJW764Z-61w/view?usp=sharing}, arxiv = {2106.01354}, title = {multi{PR}over: Generating Multiple Proofs for Improved Interpretability in Rule Reasoning}, author = {Saha, Swarnadeep and Yadav, Prateek and Bansal, Mohit}, booktitle = {Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies}, month = jun, year = {2021}, address = {Online}, publisher = {Association for Computational Linguistics}, url = {https://aclanthology.org/2021.naacl-main.287}, doi = {10.18653/v1/2021.naacl-main.287}, pages = {3662--3677} }
-
PreprintDiscrete Time Latent Hawkes Processes for Modeling Multidimensional Temporal Event StreamsYadav, Prateek, Sankaran, Raman, Dutta, Partha, and Bhatt, RushiPreprint 2021
Multidimensional event streams are common in many applications such as content on social platforms. Existing models using Hawkes processes and its variants often ignore important information about the causal parents of the events, which typically is readily available in social media applications. These models also ignore the disproportionate response created by some of the rare events, such as the impact of a “like” on a content by an influencer. Addressing these limitations, this paper proposes a novel Bayesian dIscRete Time Hawkes (BIRTH) model, a Bayesian generative model for multidimensional event streams data with causal information. Through its latent variables, BIRTH is flexible enough to capture contrasting responses invoked by multiple events of the same type. Moreover, being a discrete-time model, the latent parameters scale as O(#Time bins) in BIRTH as compared to O(#events) for continuous-time processes, thus scaling better in the settings when the number of events is huge. For inference, we propose a Gibbs sampling based inference procedure for BIRTH, which is suitable when the whole data can be processed together. While a full variational inference procedure is difficult to arrive at due to non-conjugate factors in the posterior, we propose a Stochastic hybrid Gibbs-Variational Inference (SVI) algorithm, which is beneficial in the settings where Gibbs might be expensive in terms of memory requirements. SVI has per-iteration memory complexity proportional to the chosen minibatch size, and also extends easily for online streaming settings of the data. We thoroughly evaluate BIRTH’s abilities over both synthetic and real-world social network event streams. Specifically, on synthetic datasets we demonstrate model fitting, recovery of planted structure and identification of the rare events. For a social network dataset we show significantly higher likelihoods along with rare event identification.
@article{virality, abbr = {Preprint}, bibtex_show = {true}, selected = {false}, title = {Discrete Time Latent Hawkes Processes for Modeling Multidimensional Temporal Event Streams}, author = {{Yadav}, Prateek and {Sankaran}, Raman and {Dutta}, Partha and {Bhatt}, Rushi}, journal = {Preprint}, year = {2021}, pdf = {birth.pdf} }
-
GCLR AAAIRank Refinement: An Algorithmic framework with applications to diversity aware influence maximizationYadav, Prateek, and Rajkumar, ArunGCLR AAAI 2021
Several real-world problems including network influence maximization, rank aggregation, etc. can be posed as problems that output a good ranking of a set of items. We introduce a unifying algorithmic framework based on a novel notion called rank refinement of monotonic set functions to tackle such problems. We prove that under very mild monotonicity assumptions the proposed algorithm converges to a stable ranking. We show that IMRank, a highly scalable influence maximization algorithm can be derived as a special case of our framework. By careful choice of the monotonic set functions, we derive novel generalizations of IMRank that balances the influence and diversity of the top-ranked nodes without compromising on scalability. We provide extensive experimental analysis on both synthetic data-sets based on stochastic block models and large scale real-world data-sets to demonstrate the efficacy of the proposed framework.
@article{RR, abbr = {GCLR AAAI}, bibtex_show = {true}, selected = {false}, title = {Rank Refinement: An Algorithmic framework with applications to diversity aware influence maximization}, author = {{Yadav}, Prateek and {Rajkumar}, Arun}, journal = {GCLR AAAI}, year = {2021}, pdf = {rr.pdf} }
2020
-
CIKMNHP: Neural Hypergraph Link PredictionYadati, Naganand, Nitin, Vikram, Nimishakavi, Madhav, Yadav, Prateek, Louis, Anand, and Talukdar, ParthaIn Proceedings of the 29th ACM International Conference on Information & Knowledge Management 2020
Link prediction insimple graphs is a fundamental problem in which new links between vertices are predicted based on the observed structure of the graph. However, in many real-world applications, there is a need to model relationships among vertices that go beyond pairwise associations. For example, in a chemical reaction, relationship among the reactants and products is inherently higher-order. Additionally, there is a need to represent the direction from reactants to products. Hypergraphs provide a natural way to represent such complex higher-order relationships. Graph Convolutional Network (GCN) has recently emerged as a powerful deep learning-based approach for link prediction over simple graphs. However, their suitability for link prediction in hypergraphs is underexplored – we fill this gap in this paper and propose Neural Hyperlink Predictor (NHP). NHP adapts GCNs for link prediction in hypergraphs. We propose two variants of NHP – NHP-U and NHP-D – for link prediction over undirected and directed hypergraphs, respectively. To the best of our knowledge, NHP-D is the first-ever method for link prediction over directed hypergraphs. An important feature of NHP is that it can also be used for hyperlinks in which dissimilar vertices interact (e.g. acids reacting with bases). Another attractive feature of NHP is that it can be used to predict unseen hyperlinks at test time (inductive hyperlink prediction). Through extensive experiments on multiple real-world datasets, we show NHP’s effectiveness.
@inproceedings{10.1145/3340531.3411870, abbr = {CIKM}, bibtex_show = {true}, selected = {false}, author = {Yadati, Naganand and Nitin, Vikram and Nimishakavi, Madhav and Yadav, Prateek and Louis, Anand and Talukdar, Partha}, title = {NHP: Neural Hypergraph Link Prediction}, year = {2020}, isbn = {9781450368599}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, pdf = {https://doi.org/10.1145/3340531.3411870}, doi = {10.1145/3340531.3411870}, booktitle = {Proceedings of the 29th ACM International Conference on Information & Knowledge Management}, pages = {1705–1714}, numpages = {10}, keywords = {directed hypergraph, graph neural network, knowledge graph canonicalisation, link prediction}, location = {Virtual Event, Ireland}, series = {CIKM '20} }
2019
-
NeurIPSHyperGCN: A New Method For Training Graph Convolutional Networks on HypergraphsYadati, Naganand, Nimishakavi, Madhav, Yadav, Prateek, Nitin, Vikram, Louis, Anand, and Talukdar, ParthaIn Advances in Neural Information Processing Systems 2019
In many real-world network datasets such as co-authorship, co-citation, email communication, etc., relationships are complex and go beyond pairwise. Hypergraphs provide a flexible and natural modeling tool to model such complex relationships. The obvious existence of such complex relationships in many real-world networks naturaly motivates the problem of learning with hypergraphs. A popular learning paradigm is hypergraph-based semi-supervised learning (SSL) where the goal is to assign labels to initially unlabeled vertices in a hypergraph. Motivated by the fact that a graph convolutional network (GCN) has been effective for graph-based SSL, we propose HyperGCN, a novel GCN for SSL on attributed hypergraphs. Additionally, we show how HyperGCN can be used as a learning-based approach for combinatorial optimisation on NP-hard hypergraph problems. We demonstrate HyperGCN’s effectiveness through detailed experimentation on real-world hypergraphs. We have made HyperGCN’s source code available to foster reproducible research.
@inproceedings{NEURIPS2019_1efa39bc, abbr = {NeurIPS}, bibtex_show = {true}, selected = {true}, author = {Yadati, Naganand and Nimishakavi, Madhav and Yadav, Prateek and Nitin, Vikram and Louis, Anand and Talukdar, Partha}, booktitle = {Advances in Neural Information Processing Systems}, editor = {Wallach, H. and Larochelle, H. and Beygelzimer, A. and d\textquotesingle Alch\'{e}-Buc, F. and Fox, E. and Garnett, R.}, pages = {}, publisher = {Curran Associates, Inc.}, title = {HyperGCN: A New Method For Training Graph Convolutional Networks on Hypergraphs}, url = {https://proceedings.neurips.cc/paper/2019/file/1efa39bcaec6f3900149160693694536-Paper.pdf}, volume = {32}, year = {2019}, arxiv = {1809.02589}, code = {https://github.com/malllabiisc/HyperGCN}, supp = {https://papers.nips.cc/paper/2019/hash/1efa39bcaec6f3900149160693694536-Abstract.html} }
-
AISTATSLovasz Convolutional NetworksYadav, Prateek, Nimishakavi, Madhav, Yadati, Naganand, Vashishth, Shikhar, Rajkumar, Arun, and Talukdar, ParthaIn Proceedings of the Twenty-Second International Conference on Artificial Intelligence and Statistics 2019
Semi-supervised learning on graph structured data has received significant attention with the recent introduction of Graph Convolution Networks (GCN). While traditional methods have focused on optimizing a loss augmented with Laplacian regularization framework, GCNs perform an implicit Laplacian type regularization to capture local graph structure. In this work, we propose Lovasz Convolutional Network (LCNs) which are capable of incorporating global graph properties. LCNs achieve this by utilizing Lovasz’s orthonormal embeddings of the nodes. We analyse local and global properties of graphs and demonstrate settings where LCNs tend to work better than GCNs. We validate the proposed method on standard random graph models such as stochastic block models (SBM) and certain community structure based graphs where LCNs outperform GCNs and learn more intuitive embeddings. We also perform extensive binary and multi-class classification experiments on real world datasets to demonstrate LCN’s effectiveness. In addition to simple graphs, we also demonstrate the use of LCNs on hyper-graphs by identifying settings where they are expected to work better than GCNs.
@inproceedings{pmlr-v89-yadav19a, abbr = {AISTATS}, bibtex_show = {true}, selected = {true}, code = {https://github.com/malllabiisc/lcn}, poster = {https://drive.google.com/file/d/1j78XF6FpDp11Ubo1YI-wFIBV-52d2HZ1/view?usp=sharing}, arxiv = {1805.11365}, title = {Lovasz Convolutional Networks}, author = {Yadav, Prateek and Nimishakavi, Madhav and Yadati, Naganand and Vashishth, Shikhar and Rajkumar, Arun and Talukdar, Partha}, booktitle = {Proceedings of the Twenty-Second International Conference on Artificial Intelligence and Statistics}, pages = {1978--1987}, year = {2019}, editor = {Chaudhuri, Kamalika and Sugiyama, Masashi}, volume = {89}, series = {Proceedings of Machine Learning Research}, month = {16--18 Apr}, publisher = {PMLR}, pdf = {http://proceedings.mlr.press/v89/yadav19a/yadav19a.pdf}, url = {https://proceedings.mlr.press/v89/yadav19a.html} }
-
ACLIncorporating Syntactic and Semantic Information in Word Embeddings using Graph Convolutional NetworksVashishth, Shikhar, Yadav, Prateek*, Bhandari, Manik*, Rai, Piyush, Bhattacharyya, Chiranjib, and Talukdar, ParthaIn Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics 2019
Word embeddings have been widely adopted across several NLP applications. Most existing word embedding methods utilize sequential context of a word to learn its embedding. While there have been some attempts at utilizing syntactic context of a word, such methods result in an explosion of the vocabulary size. In this paper, we overcome this problem by proposing SynGCN, a flexible Graph Convolution based method for learning word embeddings. SynGCN utilizes the dependency context of a word without increasing the vocabulary size. Word embeddings learned by SynGCN outperform existing methods on various intrinsic and extrinsic tasks and provide an advantage when used with ELMo. We also propose SemGCN, an effective framework for incorporating diverse semantic knowledge for further enhancing learned word representations. We make the source code of both models available to encourage reproducible research.
@inproceedings{vashishth-etal-2019-incorporating, abbr = {ACL}, bibtex_show = {true}, selected = {false}, arxiv = {1809.04283}, title = {Incorporating Syntactic and Semantic Information in Word Embeddings using Graph Convolutional Networks}, author = {Vashishth, Shikhar and Yadav, Prateek* and Bhandari, Manik* and Rai, Piyush and Bhattacharyya, Chiranjib and Talukdar, Partha}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics}, month = jul, year = {2019}, address = {Florence, Italy}, publisher = {Association for Computational Linguistics}, url = {https://aclanthology.org/P19-1320}, doi = {10.18653/v1/P19-1320}, pages = {3308--3318}, code = {https://github.com/malllabiisc/WordGCN}, supp = {https://drive.google.com/file/d/18ZUlrAF1kvvG7LaZcet20tSZZwlLVLds/view?usp=sharing}, poster = {https://drive.google.com/file/d/14DMn5d2r06lvruJkpEo2TsVgwyqxQBLz/view?usp=sharing} }
-
AISTATSConfidence-based Graph Convolutional Networks for Semi-Supervised LearningVashishth, Shikhar*, Yadav, Prateek*, Bhandari, Manik, and Talukdar, ParthaIn Proceedings of the Twenty-Second International Conference on Artificial Intelligence and Statistics 2019
Predicting properties of nodes in a graph is an important problem with applications in a variety of domains. Graph-based Semi Supervised Learning (SSL) methods aim to address this problem by labeling a small subset of the nodes as seeds, and then utilizing the graph structure to predict label scores for the rest of the nodes in the graph. Recently, Graph Convolutional Networks (GCNs) have achieved impressive performance on the graph-based SSL task. In addition to label scores, it is also desirable to have confidence scores associated with them. Unfortunately, confidence estimation in the context of GCN has not been previously explored. We fill this important gap in this paper and propose ConfGCN, which estimates labels scores along with their confidences jointly in GCN-based setting. ConfGCN uses these estimated confidences to determine the influence of one node on another during neighborhood aggregation, thereby acquiring anisotropic capabilities. Through extensive analysis and experiments on standard benchmarks, we find that ConfGCN is able to outperform state-of-the-art baselines. We have made ConfGCN’s source code available to encourage reproducible research.
@inproceedings{pmlr-v89-vashishth19a, abbr = {AISTATS}, bibtex_show = {false}, selected = {false}, arxiv = {1901.08255}, code = {https://github.com/malllabiisc/ConfGCN}, title = {Confidence-based Graph Convolutional Networks for Semi-Supervised Learning}, author = {Vashishth, Shikhar* and Yadav, Prateek* and Bhandari, Manik and Talukdar, Partha}, booktitle = {Proceedings of the Twenty-Second International Conference on Artificial Intelligence and Statistics}, pages = {1792--1801}, year = {2019}, editor = {Chaudhuri, Kamalika and Sugiyama, Masashi}, volume = {89}, series = {Proceedings of Machine Learning Research}, month = {16--18 Apr}, publisher = {PMLR}, pdf = {http://proceedings.mlr.press/v89/vashishth19a/vashishth19a.pdf}, url = {https://proceedings.mlr.press/v89/vashishth19a.html} }