Tom Hosking

Email hi@tomho.sk

tl;dr

I'm a Member of Technical Staff at Cohere, working on model merging. Before that I did my PhD in NLP at Edinburgh, supervised by Mirella Lapata.

Recent Work

Publications

Hierarchical Indexing for Retrieval-Augmented Opinion Summarization Tom Hosking, Hao Tang and Mirella Lapata
TACL 2024
BibTeXCode

@article{10.1162/tacl_a_00703,
    author = {Hosking, Tom and Tang, Hao and Lapata, Mirella},
    title = {Hierarchical Indexing for Retrieval-Augmented Opinion Summarization},
    journal = {Transactions of the Association for Computational Linguistics},
    volume = {12},
    pages = {1533-1555},
    year = {2024},
    month = {11},
    abstract = {We propose a method for unsupervised abstractive opinion summarization, that combines the attributability and scalability of extractive approaches with the coherence and fluency of Large Language Models (LLMs). Our method, HIRO, learns an index structure that maps sentences to a path through a semantically organized discrete hierarchy. At inference time, we populate the index and use it to identify and retrieve clusters of sentences containing popular opinions from input reviews. Then, we use a pretrained LLM to generate a readable summary that is grounded in these extracted evidential clusters. The modularity of our approach allows us to evaluate its efficacy at each stage. We show that HIRO learns an encoding space that is more semantically structured than prior work, and generates summaries that are more representative of the opinions in the input reviews. Human evaluation confirms that HIRO generates significantly more coherent, detailed, and accurate summaries.},
    issn = {2307-387X},
    doi = {10.1162/tacl_a_00703},
    url = {https://doi.org/10.1162/tacl\_a\_00703},
    eprint = {https://direct.mit.edu/tacl/article-pdf/doi/10.1162/tacl\_a\_00703/2482018/tacl\_a\_00703.pdf},
}

Human Feedback is not Gold Standard Tom Hosking, Phil Blunsom and Max Bartolo
ICLR 2024
BibTeXCode

@misc{hosking2023human,
    title={Human Feedback is not Gold Standard}, 
    author={Tom Hosking and Phil Blunsom and Max Bartolo},
    year={2023},
    eprint={2309.16349},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}

Optimal Transport Posterior Alignment for Cross-lingual Semantic Parsing • Tom Sherborne, Tom Hosking, and Mirella Lapata
TACL 2023
BibTeXCode

@article{10.1162/tacl_a_00611,
    author = {Sherborne, Tom and Hosking, Tom and Lapata, Mirella},
    title = "{Optimal Transport Posterior Alignment for Cross-lingual Semantic Parsing}",
    journal = {Transactions of the Association for Computational Linguistics},
    volume = {11},
    pages = {1432-1450},
    year = {2023},
    month = {11},
    issn = {2307-387X},
    doi = {10.1162/tacl_a_00611},
    url = {https://doi.org/10.1162/tacl\_a\_00611},
    eprint = {https://direct.mit.edu/tacl/article-pdf/doi/10.1162/tacl\_a\_00611/2184055/tacl\_a\_00611.pdf},
}

Attributable and Scalable Opinion Summarization Tom Hosking, Hao Tang and Mirella Lapata
ACL 2023
BibTeXCode

@inproceedings{hosking-etal-2023-attributable,
    title = "Attributable and Scalable Opinion Summarization",
    author = "Hosking, Tom  and
        Tang, Hao  and
        Lapata, Mirella",
    booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
    month = jul,
    year = "2023",
    address = "Toronto, Canada",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2023.acl-long.473",
    pages = "8488--8505",
}

Hierarchical Sketch Induction for Paraphrase Generation Tom Hosking, Hao Tang and Mirella Lapata
ACL 2022
BibTeXCode

@inproceedings{hosking-etal-2022-hierarchical,
    title = "Hierarchical Sketch Induction for Paraphrase Generation",
    author = "Hosking, Tom  and
        Tang, Hao  and
        Lapata, Mirella",
    booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
    month = may,
    year = "2022",
    address = "Dublin, Ireland",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2022.acl-long.178",
    pages = "2489--2501",
}

Factorising Meaning and Form for Intent-Preserving Paraphrasing Tom Hosking and Mirella Lapata
ACL 2021
BibTeXCode

@inproceedings{hosking-lapata-2021-factorising,
    title = "Factorising Meaning and Form for Intent-Preserving Paraphrasing",
    author = "Hosking, Tom  and
        Lapata, Mirella",
    booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
    month = aug,
    year = "2021",
    address = "Online",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2021.acl-long.112",
    pages = "1405--1418",
    abstract = "We propose a method for generating paraphrases of English questions that retain the original intent but use a different surface form. Our model combines a careful choice of training objective with a principled information bottleneck, to induce a latent encoding space that disentangles meaning and form. We train an encoder-decoder model to reconstruct a question from a paraphrase with the same meaning and an exemplar with the same surface form, leading to separated encoding spaces. We use a Vector-Quantized Variational Autoencoder to represent the surface form as a set of discrete latent variables, allowing us to use a classifier to select a different surface form at test time. Crucially, our method does not require access to an external source of target exemplars. Extensive experiments and a human evaluation show that we are able to generate paraphrases with a better tradeoff between semantic preservation and syntactic novelty compared to previous methods.",
}

Querent Intent in Multi-Sentence Questions • Laurie Burchell*, Jie Chi*, Tom Hosking*, Nina Markl* and Bonnie Webber
Linguistics Annotation Workshop, COLING 2020 (* = equal contribution)
BibTeXCodeDataset

@inproceedings{burchell-etal-2020-querent,
    title = "Querent Intent in Multi-Sentence Questions",
    author = "Burchell, Laurie  and
        Chi, Jie  and
        Hosking, Tom  and
        Markl, Nina  and
        Webber, Bonnie",
    booktitle = "Proceedings of the 14th Linguistic Annotation Workshop",
    month = dec,
    year = "2020",
    address = "Barcelona, Spain",
    publisher = "Association for Computational Linguistics",
    url = "https://www.aclweb.org/anthology/2020.law-1.13",
    pages = "138--147",
    abstract = "Multi-sentence questions (MSQs) are sequences of questions connected by relations which, unlike sequences of standalone questions, need to be answered as a unit. Following Rhetorical Structure Theory (RST), we recognise that different {``}question discourse relations{''} between the subparts of MSQs reflect different speaker intents, and consequently elicit different answering strategies. Correctly identifying these relations is therefore a crucial step in automatically answering MSQs. We identify five different types of MSQs in English, and define five novel relations to describe them. We extract over 162,000 MSQs from Stack Exchange to enable future research. Finally, we implement a high-precision baseline classifier based on surface features.",
}

Evaluating Rewards for Question Generation ModelsTom Hosking and Sebastian Riedel
NAACL 2019
BibTeXCode

@inproceedings{hosking-riedel-2019-evaluating,
    title = "Evaluating Rewards for Question Generation Models",
    author = "Hosking, Tom  and
    Riedel, Sebastian",
    booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)",
    month = jun,
    year = "2019",
    address = "Minneapolis, Minnesota",
    publisher = "Association for Computational Linguistics",
    url = "https://www.aclweb.org/anthology/N19-1237",
    pages = "2278--2283"
}

Links