Skip to content

Commit

Permalink
update pub
Browse files Browse the repository at this point in the history
  • Loading branch information
Zhishuai Zhang authored and Zhishuai Zhang committed Jul 1, 2024
1 parent a4a1817 commit 07b5e25
Show file tree
Hide file tree
Showing 9 changed files with 120 additions and 76 deletions.
6 changes: 0 additions & 6 deletions data/tu2023unicorns.bib

This file was deleted.

6 changes: 6 additions & 0 deletions data/tu2024unicorns.bib
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
@article{tu2024unicorn,
title = {How Many Unicorns Are In This Image? A Safety Evaluation Benchmark For Vision LLMs},
author = {Tu, Haoqin and Cui, Chenhang and Wang, Zijun and Zhou, Yiyang and Zhao, Bingchen and Han, Junlin and Zhou, Wangchunshu and Yao, Huaxiu and Xie, Cihang},
booktitle = {ECCV},
year = {2024}
}
6 changes: 0 additions & 6 deletions data/xiao2023semantic.bib

This file was deleted.

6 changes: 6 additions & 0 deletions data/xiao2024semantic.bib
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
@inproceedings{xiao2024semantic,
title = {A Semantic Space is Worth 256 Language Descriptions: Make Stronger Segmentation Models with Descriptive Properties},
author = {Xiao, Junfei and Zhou, Ziqi and Li, Wenxuan and Lan, Shiyi and Mei, Jieru and Yu, Zhiding and Yuille, Alan and Zhou, Yuyin and Xie, Cihang},
booktitle = {ECCV},
year = {2024}
}
6 changes: 6 additions & 0 deletions data/xie2024lgformer.bib
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
@inproceedings{xie2024lgformer,
title = {From Pixels to Objects: A Hierarchical Approach for Part and Object Segmentation Using Local and Global Aggregation},
author = {Xie, Yunfei and Xie, Cihang and Yuille, Alan and Mei, Jieru},
booktitle = {ECCV},
year = {2024}
}
File renamed without changes
File renamed without changes
Binary file added images/xie2024lgformer.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
166 changes: 102 additions & 64 deletions index.html
Original file line number Diff line number Diff line change
Expand Up @@ -102,8 +102,12 @@
<!-- <li><strong>[<font color="red">February 2023</font>]</strong> I will be giving a talk in <a href="https://practical-dl.github.io/">AAAI 2023 2nd International Workshop on Practical Deep Learning in the Wild</a>.</li>
-->



<li><strong>[<font color="red">NEW</font>]</strong> We release <a href="https://www.haqtu.me/Recap-Datacomp-1B/">Recap-DataComp-1B</a>, where we use a LLaMA-3-powered LLaVA model to recaption the entire 1.3 billion images from DataComp-1B. Our Recap-DataComp-1B shows higher textual quality, and can help to train stronger CLIP models and T2I models.</li>

<li><strong>[July 2024]</strong> Three papers are accepted by ECCV 2024.</li>

<li><strong>[May 2024]</strong> Congratulations to <a href="https://xhl-video.github.io/xianhangli/">Xianhang Li</a> on winning the Jack Baskin & Peggy Downes-Baskin Fellowship. Additionally, D-iGPT is accepted by ICML 2024 --- our strongest model secures an ImageNet top-1 accuracy of <strong>90.0%</strong> with ViT-H.</li>

<li><strong>[April 2024]</strong> We release <a href="https://thefllood.github.io/HQEdit_web/">HQ-Edit</a>, a dataset with high-resolution images & detailed and aligned editing instructions. Our fine-tuned InstructPix2Pix delivers superior editing performance. Additionally, two papers are accepted by TMLR.</li>
Expand Down Expand Up @@ -511,7 +515,102 @@ <h4><u>2024</u></h4>
</tr> <!--ren2024digpt-->


<tr bgcolor="#ffffd0">
<tr bgcolor="#ffffd0">
<td style="padding:20px;width:35%;vertical-align:middle">
<img src='images/tu2024unicorns.png' width="250"></div>
</td>
<td width="75%" valign="middle">
<p>
<a href="https://arxiv.org/abs/2311.16101">
<papertitle>How Many Unicorns Are in This Image? A Safety Evaluation Benchmark for Vision LLMs</papertitle>
</a>
<br>
<a href="https://scholar.google.com/citations?user=hyFMd54AAAAJ&hl">Haoqin Tu</a>,
<a href="https://scholar.google.com/citations?user=V5X1gdAAAAAJ">Chenhang Cui</a>,
<a href="https://asillycat.github.io/">Zijun Wang</a>,
<a href="https://scholar.google.com/citations?user=6KltFMAAAAAJ">Yiyang Zhou</a>,
<a href="https://bzhao.me/">Bingchen Zhao</a>,
<a href="https://junlinhan.github.io/">Junlin Han</a>,
<a href="https://michaelzhouwang.github.io/">Wangchunshu Zhou</a>,
<a href="https://www.huaxiuyao.io/">Huaxiu Yao</a>,
<strong>Cihang Xie</strong>
<br>
<em>ECCV</em>, 2024
<br>
</p>
<div class="paper" id="tu2024unicorns">
<a href="https://arxiv.org/pdf/2311.16101.pdf">pdf</a> /
<a href="https://github.com/UCSC-VLAA/vllm-safety-benchmark">project page</a> /
<a href="data/tu2024unicorns.bib">bibtex</a>
</div>
<br>
</td>
</tr> <!--tu2024unicorns-->


<tr bgcolor="#ffffd0">
<td style="padding:20px;width:35%;vertical-align:middle">
<img src='images/xiao2024semantic.png' width="250"></div>
</td>
<td width="75%" valign="middle">
<p>
<a href="https://arxiv.org/abs/2312.13764">
<papertitle>A Semantic Space is Worth 256 Language Descriptions: Make Stronger Segmentation Models with Descriptive Properties</papertitle>
</a>
<br>
<a href="https://scholar.google.com/citations?user=rv-aTqkAAAAJ">Junfei Xiao</a>,
<a href="https://zzzqzhou.github.io/">Ziqi Zhou</a>,
<a href="https://scholar.google.com/citations?user=tpNZM2YAAAAJ">Wenxuan Li</a>,
<a href="https://voidrank.github.io/">Shiyi Lan</a>,
<a href="https://meijieru.com/">Jieru Mei</a>,
<a href="https://scholar.google.com/citations?user=1VI_oYUAAAAJ">Zhiding Yu</a>,
<a href="https://bzhao.me/">Bingchen Zhao</a>,
<a href="http://www.cs.jhu.edu/~ayuille/">Alan Yuille</a>,
<a href="https://yuyinzhou.github.io/">Yuyin Zhou</a>,
<strong>Cihang Xie</strong>
<br>
<em>ECCV</em>, 2024
<br>
</p>
<div class="paper" id="xiao2024semantic">
<a href="https://arxiv.org/pdf/2312.13764.pdf">pdf</a> /
<a href="https://github.com/lambert-x/ProLab">project page</a> /
<a href="data/xiao2024semantic.bib">bibtex</a>
</div>
<br>
</td>
</tr> <!--xiao2024semantic-->



<tr bgcolor="#ffffd0">
<td style="padding:20px;width:35%;vertical-align:middle">
<img src='images/xie2024lgformer.png' width="250"></div>
</td>
<td width="75%" valign="middle">
<p>
<a href="">
<papertitle>From Pixels to Objects: A Hierarchical Approach for Part and Object Segmentation Using Local and Global Aggregation</papertitle>
</a>
<br>
<a href="https://github.com/yunfeixie233">Yunfei Xie</a>,
<strong>Cihang Xie</strong>,
<a href="http://www.cs.jhu.edu/~ayuille/">Alan Yuille</a>,
<a href="https://meijieru.com/">Jieru Mei</a>
<br>
<em>ECCV</em>, 2024
<br>
</p>
<div class="paper" id="xie2024lgformer">
<a href="">pdf</a> /
<a href="data/xie2024lgformer.bib">bibtex</a>
</div>
<br>
</td>
</tr> <!--xie2024lgformer-->


<tr bgcolor="#ffffd0">
<td style="padding:20px;width:35%;vertical-align:middle">
<img src='images/ren2024arm.png' width="250"></div>
</td>
Expand Down Expand Up @@ -1071,38 +1170,7 @@ <h4><u>2023</u></h4>



<tr bgcolor="#ffffd0">
<td style="padding:20px;width:35%;vertical-align:middle">
<img src='images/xiao2023semantic.png' width="250"></div>
</td>
<td width="75%" valign="middle">
<p>
<a href="https://arxiv.org/abs/2312.13764">
<papertitle>A Semantic Space is Worth 256 Language Descriptions: Make Stronger Segmentation Models with Descriptive Properties</papertitle>
</a>
<br>
<a href="https://scholar.google.com/citations?user=rv-aTqkAAAAJ">Junfei Xiao</a>,
<a href="https://zzzqzhou.github.io/">Ziqi Zhou</a>,
<a href="https://scholar.google.com/citations?user=tpNZM2YAAAAJ">Wenxuan Li</a>,
<a href="https://voidrank.github.io/">Shiyi Lan</a>,
<a href="https://meijieru.com/">Jieru Mei</a>,
<a href="https://scholar.google.com/citations?user=1VI_oYUAAAAJ">Zhiding Yu</a>,
<a href="https://bzhao.me/">Bingchen Zhao</a>,
<a href="http://www.cs.jhu.edu/~ayuille/">Alan Yuille</a>,
<a href="https://yuyinzhou.github.io/">Yuyin Zhou</a>,
<strong>Cihang Xie</strong>
<br>
<em>arxiv</em>, 2023
<br>
</p>
<div class="paper" id="xiao2023semantic">
<a href="https://arxiv.org/pdf/2312.13764.pdf">pdf</a> /
<a href="https://github.com/lambert-x/ProLab">project page</a> /
<a href="data/xiao2023semantic.bib">bibtex</a>
</div>
<br>
</td>
</tr> <!--xiao2023semantic-->




Expand Down Expand Up @@ -1162,37 +1230,7 @@ <h4><u>2023</u></h4>
</tr> <!--zhang2023compress-->


<tr bgcolor="#ffffd0">
<td style="padding:20px;width:35%;vertical-align:middle">
<img src='images/tu2023unicorns.png' width="250"></div>
</td>
<td width="75%" valign="middle">
<p>
<a href="https://arxiv.org/abs/2311.16101">
<papertitle>How Many Unicorns Are in This Image? A Safety Evaluation Benchmark for Vision LLMs</papertitle>
</a>
<br>
<a href="https://scholar.google.com/citations?user=hyFMd54AAAAJ&hl">Haoqin Tu</a>,
<a href="https://scholar.google.com/citations?user=V5X1gdAAAAAJ">Chenhang Cui</a>,
<a href="https://asillycat.github.io/">Zijun Wang</a>,
<a href="https://scholar.google.com/citations?user=6KltFMAAAAAJ">Yiyang Zhou</a>,
<a href="https://bzhao.me/">Bingchen Zhao</a>,
<a href="https://junlinhan.github.io/">Junlin Han</a>,
<a href="https://michaelzhouwang.github.io/">Wangchunshu Zhou</a>,
<a href="https://www.huaxiuyao.io/">Huaxiu Yao</a>,
<strong>Cihang Xie</strong>
<br>
<em>arxiv</em>, 2023
<br>
</p>
<div class="paper" id="tu2023unicorns">
<a href="https://arxiv.org/pdf/2311.16101.pdf">pdf</a> /
<a href="https://github.com/UCSC-VLAA/vllm-safety-benchmark">project page</a> /
<a href="data/tu2023unicorns.bib">bibtex</a>
</div>
<br>
</td>
</tr> <!--tu2023unicorns-->




Expand Down

0 comments on commit 07b5e25

Please sign in to comment.