diff --git a/assets/boston.yaml b/assets/boston.yaml index 0bbfaaa9..adec6753 100644 --- a/assets/boston.yaml +++ b/assets/boston.yaml @@ -22,3 +22,24 @@ prohibited_uses: '' monitoring: '' feedback: https://huggingface.co/garage-bAInd/Platypus2-13B/discussions +- type: model + name: UFOGen + organization: Boston University + description: UFOGen is a novel generative model designed for ultra-fast, one-step text-to-image synthesis. + created_date: 2023-11-14 + url: https://arxiv.org/pdf/2311.09257.pdf + model_card: none + modality: text; image + analysis: UFOGen is evaluated on standard image benchmarks against other models fine-tuned with Stable Diffusion. + size: 900M parameters (dense) + dependencies: [Stable Diffusion] + training_emissions: unknown + training_time: unknown + training_hardware: unknown + quality_control: '' + access: open + license: unknown + intended_uses: '' + prohibited_uses: '' + monitoring: '' + feedback: none diff --git a/assets/deepmind.yaml b/assets/deepmind.yaml index 45d0fac7..32aba2e1 100644 --- a/assets/deepmind.yaml +++ b/assets/deepmind.yaml @@ -639,3 +639,24 @@ prohibited_uses: '' monitoring: '' feedback: '' +- type: model + name: Lyria + organization: DeepMind + description: Lyria is DeepMind's most advanced AI music generation model to date. + created_date: 2023-11-16 + url: https://deepmind.google/discover/blog/transforming-the-future-of-music-creation/ + model_card: none + modality: text; music + analysis: unknown + size: unknown + dependencies: [] + training_emissions: unknown + training_time: unknown + training_hardware: unknown + quality_control: worked with artists and music industry to ensure utility + access: limited + license: unknown + intended_uses: '' + prohibited_uses: '' + monitoring: '' + feedback: '' diff --git a/assets/meta.yaml b/assets/meta.yaml index 79174172..203f8af9 100644 --- a/assets/meta.yaml +++ b/assets/meta.yaml @@ -694,6 +694,56 @@ prohibited_uses: '' monitoring: '' feedback: '' + +- type: model + name: Emu Video + organization: Meta + description: Emu Video is a text-to-video generation model that factorizes the + generation into two steps, first generating an image conditioned on the text, + and then generating a video conditioned on the text and the generated image. + created_date: 2023-11-16 + url: https://emu-video.metademolab.com/ + model_card: none + modality: text; video + analysis: Analyzed against nearest neighbor model baseline and by extending the + video length. + size: 6B parameters (dense) + dependencies: [Emu, CLIP, T5] + training_emissions: unknown + training_time: unknown + training_hardware: unknown + quality_control: '' + access: limited + license: unknown + intended_uses: '' + prohibited_uses: '' + monitoring: '' + feedback: none + +- type: model + name: Emu Edit + organization: Meta + description: Emu Edit is a multi-task image editing model which sets state-of-the-art + results in instruction-based image editing. + created_date: 2023-11-16 + url: https://emu-edit.metademolab.com/ + model_card: none + modality: text; image + analysis: Evaluated on test set of actions in comparison to SoTA image editing + models. + size: unknown + dependencies: [Emu, CLIP, T5] + training_emissions: unknown + training_time: unknown + training_hardware: unknown + quality_control: '' + access: limited + license: unknown + intended_uses: '' + prohibited_uses: '' + monitoring: '' + feedback: none + - type: model name: MetaCLIP organization: Meta