From 7ed017559b74354b318f2197fb2ca742a9439e7b Mon Sep 17 00:00:00 2001
From: zhurui <274461951@qq.com>
Date: Mon, 17 Jun 2024 14:04:28 +0800
Subject: [PATCH] commit
---
.gitignore | 4 +
README.md | 0
main.py | 176 ++
method3_dict.txt | 20 +
result.json | 1 +
result1.json | 518 ++++++
result_arxiv_knowledge_graph.json | 1 +
t1.py | 14 +
te_u/arxiv.py | 150 ++
te_u/paper_down_load/csv/ECCV_2022.csv | 1646 +++++++++++++++++
te_u/paper_down_load/eccv_download.py | 658 +++++++
te_u/paper_down_load/pdf_show.py | 9 +
te_u/paper_down_load/pdf_show2.py | 64 +
.../urls/init_url_ECCV_2022.dat | Bin 0 -> 2299323 bytes
te_u/result_arxiv_knowledge_graph.json | 32 +
temp.py | 2 +
test_textrank_en.py | 160 ++
test_textrank_zh.py | 18 +
utils.py | 157 ++
小实验/t.json | 168 ++
小实验/网页访问gpt-4.py | 129 ++
小实验/网页访问gpt-4——上传文件.py | 137 ++
...息爬取(题目、期刊、日期、摘要、关键词)_1.py | 282 +++
23 files changed, 4346 insertions(+)
create mode 100644 .gitignore
create mode 100644 README.md
create mode 100644 main.py
create mode 100644 method3_dict.txt
create mode 100644 result.json
create mode 100644 result1.json
create mode 100644 result_arxiv_knowledge_graph.json
create mode 100644 t1.py
create mode 100644 te_u/arxiv.py
create mode 100644 te_u/paper_down_load/csv/ECCV_2022.csv
create mode 100644 te_u/paper_down_load/eccv_download.py
create mode 100644 te_u/paper_down_load/pdf_show.py
create mode 100644 te_u/paper_down_load/pdf_show2.py
create mode 100644 te_u/paper_down_load/urls/init_url_ECCV_2022.dat
create mode 100644 te_u/result_arxiv_knowledge_graph.json
create mode 100644 temp.py
create mode 100644 test_textrank_en.py
create mode 100644 test_textrank_zh.py
create mode 100644 utils.py
create mode 100644 小实验/t.json
create mode 100644 小实验/网页访问gpt-4.py
create mode 100644 小实验/网页访问gpt-4——上传文件.py
create mode 100644 论文信息爬取(题目、期刊、日期、摘要、关键词)_1.py
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..98c62e4
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,4 @@
+__pycache__
+.idea
+paper_download/
+te_u/paper_down_load/ECCV_2022/
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..e69de29
diff --git a/main.py b/main.py
new file mode 100644
index 0000000..5e350da
--- /dev/null
+++ b/main.py
@@ -0,0 +1,176 @@
+import gradio as gr
+import os
+
+from te_u.arxiv import get_news_from_arxiv
+#
+# os.environ['http_proxy'] = '127.0.0.1:7890'
+# os.environ['https_proxy'] = '127.0.0.1:7890'
+
+from utils import get_news, get_clouds
+from gradio_pdf import PDF
+
+current_pdf_file = None
+news = []
+choose_news = []
+
+with gr.Blocks() as demo:
+ with gr.Row():
+ with gr.Column(scale=20):
+ gr.HTML("""
科研情报
""")
+ with gr.Column(scale=1, min_width=100):
+ gr.HTML(
+ """
"""
+ )
+ gr.HTML(
+ """ Created by 朱瑞
"""
+ )
+
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
+ with gr.TabItem("科研文献分析"):
+ with gr.Row():
+ with gr.Accordion("文献采集区", open=True, ) as area_news_get_fn:
+ keywords = gr.Dropdown(choices=["对抗攻击", "knowledge graph", "认知智能与先进计算", "电磁空间感知与利用", "信息安全与攻防博弈"],
+ value="对抗攻击", label="关键词", show_label=True)
+ source = gr.Dropdown(choices=["知网", "arxiv"], value="知网", label="数据源", show_label=True)
+ num = gr.Slider(1, 100, value=10, label="采集条数", step=1)
+ news_get = gr.Button("获取论文", variant='primary')
+
+ with gr.Row():
+ with gr.Accordion("文献标记分析区", open=True, elem_id="news-panel") as news_get_fn:
+ chosen_news = gr.CheckboxGroup(choices=[item['name'] for item in news], label="需要进行操作的文献")
+
+ with gr.Row():
+ news_mark = gr.Button("标记文献")
+ news_all_mark = gr.Button("全部标记", variant='primary')
+
+
+ def recover_news_by_choose(news_titles):
+ select_news = []
+ global news
+
+ for news_title in news_titles:
+ for i in news:
+ if news_title == i['name']:
+ new_i = i
+ select_news.append(new_i)
+ break
+
+ return select_news
+
+
+ def mark_new(titles):
+ global choose_news
+ mark_news = recover_news_by_choose(titles)
+ choose_news = mark_news
+
+
+ def get_news_temp(num, keywords, source):
+ """ 获取临时的文献 """
+ global news
+ results = []
+ if source == "知网":
+ results = get_news(num, keywords)
+ elif source == "arxiv":
+ results = get_news_from_arxiv(num, keywords)
+
+ news.extend(results)
+ return gr.CheckboxGroup(choices=[item['name'] for item in news], label="需要进行操作的文献")
+
+
+ def mark_all_new():
+ global news
+ global choose_news
+ choose_news = news
+ return gr.CheckboxGroup(choices=[item['name'] for item in news], value=[item['name'] for item in news], label="需要进行操作的文献")
+
+
+ news_get.click(get_news_temp, inputs=[num, keywords, source], outputs=[chosen_news])
+ news_mark.click(mark_new, inputs=[chosen_news])
+ news_all_mark.click(mark_all_new, outputs=[chosen_news])
+
+ with gr.TabItem("科研文献获取"):
+ with gr.Row():
+ with gr.Accordion("功能区", open=True, ) as area_news_analyse_fn:
+ with gr.Row():
+ ci_yun_by_title = gr.Button("题目词云", variant='primary')
+ ci_yun_by_abstract = gr.Button("摘要词云", variant='primary')
+ with gr.Row():
+ with gr.Accordion("结果展示区", open=True, ) as area_news_result_fn:
+ result_place = gr.Image()
+
+
+ def g_ci_yun_by_title():
+ global choose_news
+ word_list = [c["name"] for c in choose_news]
+ pic = get_clouds(word_list)
+ return pic
+
+
+ def g_ci_yun_by_abstract():
+ global choose_news
+ word_list = [c["abstract"] for c in choose_news]
+ pic = get_clouds(word_list)
+ return pic
+
+
+ ci_yun_by_title.click(g_ci_yun_by_title, outputs=[result_place])
+ ci_yun_by_abstract.click(g_ci_yun_by_abstract, outputs=[result_place])
+
+ with gr.TabItem("会议论文查看"):
+ with gr.Row():
+ with gr.Column(scale=1):
+ with gr.Row():
+ # gr.Label("会议名称")
+ conf_name = gr.Dropdown(choices=["ECCV2022", "ECCV2020", "CVPR2024"], value="ECCV2022", label="会议名称", show_label=True)
+ conf_button = gr.Button("查看会议论文", variant='primary')
+ dataframe = gr.Dataframe(headers=["论文名称"], col_count=(1, "fixed"), type='array', height=800)
+ with gr.Row():
+ look_input = gr.Textbox(placeholder="关键词检索", label="关键词过滤")
+ filter_button = gr.Button("过滤")
+ # up_button = gr.Button("加载")
+
+ with gr.Column(scale=2):
+ pdf = PDF(label="Upload a PDF", interactive=True, height=1000)
+
+
+ # name = gr.Textbox(show_label=False)
+ # pdf.upload(lambda f: f, pdf, name)
+
+ def up_load():
+ global current_pdf_file
+ n = r"D:\py\keyan_qingbao\te_u\paper_down_load\ECCV_2022\main_paper\3d-siamese-transformer-network-for-single-object-tracking-on-point-clouds_ECCV_2022.pdf"
+ current_pdf_file = n
+ return n
+
+
+ def load_conf_list(conf_name):
+ if conf_name == "ECCV2022":
+ root_dir = r"D:\py\keyan_qingbao\te_u\paper_down_load\ECCV_2022\main_paper"
+ return [[i] for i in os.listdir(root_dir)]
+
+
+ def look_dataframe(evt: gr.SelectData):
+ global current_pdf_file
+ if evt.value:
+ root_dir = r"D:\py\keyan_qingbao\te_u\paper_down_load\ECCV_2022\main_paper"
+ n = os.path.join(root_dir, evt.value)
+ if os.path.exists(n):
+ current_pdf_file = n
+ return current_pdf_file
+
+
+ def filter_by_word(words, paper_list):
+ word_list = words.strip().split()
+ paper_list_filter = [p[0] for p in paper_list]
+ for word in word_list:
+ paper_list_filter = [p for p in paper_list_filter if word in p]
+ return [[p] for p in paper_list_filter]
+
+
+ filter_button.click(filter_by_word, inputs=[look_input, dataframe], outputs=[dataframe])
+ dataframe.select(look_dataframe, inputs=None, outputs=[pdf])
+ conf_button.click(load_conf_list, inputs=[conf_name], outputs=[dataframe])
+ # up_button.click(up_load, inputs=None, outputs=[pdf])s
+
+if __name__ == '__main__':
+ demo.queue().launch(inbrowser=True, server_name='127.0.0.1', server_port=23223)
diff --git a/method3_dict.txt b/method3_dict.txt
new file mode 100644
index 0000000..d550e6a
--- /dev/null
+++ b/method3_dict.txt
@@ -0,0 +1,20 @@
+_trad_ 12.215113775321004
+task 11.808329224486352
+step 10.115128616689704
+thought 9.468294108747731
+performance 7.91112148935495
+agent 7.908585185590241
+demonstration 7.695334786087041
+retrieval 7.60209065815528
+method 7.186012901911181
+trajectory 6.258998528039508
+information 5.995282554194667
+_synapse_ 5.572552304074627
+relevant 5.527015778258248
+example 5.080665441372099
+reason 4.676097441406382
+_react_ 4.570513969848461
+baseline 4.479754027332443
+prompt 4.395961022082388
+achieve 4.296215825920176
+current 4.284028839203101
diff --git a/result.json b/result.json
new file mode 100644
index 0000000..91133c2
--- /dev/null
+++ b/result.json
@@ -0,0 +1 @@
+[{"name": "\u9488\u5bf9\u7535\u529bCPS\u6570\u636e\u9a71\u52a8\u7b97\u6cd5\u5bf9\u6297\u653b\u51fb\u7684\u9632\u5fa1\u65b9\u6cd5", "authors": ["\u6731\u536b\u5e731", "\u6c64\u59552", "\u9b4f\u5174\u614e3", "\u5218\u589e\u7a372"], "affiliations": ["1. \u56fd\u7f51\u6c5f\u82cf\u7701\u7535\u529b\u6709\u9650\u516c\u53f8", "2. \u4e1c\u5357\u5927\u5b66\u7535\u6c14\u5de5\u7a0b\u5b66\u9662", "3. \u5357\u745e\u96c6\u56e2\u6709\u9650\u516c\u53f8(\u56fd\u7f51\u7535\u529b\u79d1\u5b66\u7814\u7a76\u9662\u6709\u9650\u516c\u53f8)"], "abstract": "\u5927\u89c4\u6a21\u7535\u529b\u7535\u5b50\u8bbe\u5907\u7684\u63a5\u5165\u4e3a\u7cfb\u7edf\u5f15\u5165\u4e86\u6570\u91cf\u5e9e\u5927\u7684\u5f3a\u975e\u7ebf\u6027\u91cf\u6d4b/\u63a7\u5236\u8282\u70b9\uff0c\u4f7f\u5f97\u4f20\u7edf\u7535\u529b\u7cfb\u7edf\u9010\u6e10\u8f6c\u53d8\u4e3a\u7535\u529b\u4fe1\u606f\u7269\u7406\u7cfb\u7edf\uff08cyber-physical system\uff0c CPS\uff09\uff0c\u8bb8\u591a\u539f\u672c\u5e94\u7528\u6a21\u578b\u9a71\u52a8\u65b9\u6cd5\u89e3\u51b3\u7684\u7cfb\u7edf\u95ee\u9898\u4e0d\u5f97\u4e0d\u56e0\u7ef4\u5ea6\u707e\u96be\u7b49\u5c40\u9650\u8f6c\u800c\u91c7\u53d6\u6570\u636e\u9a71\u52a8\u7b97\u6cd5\u8fdb\u884c\u5206\u6790\u3002\u7136\u800c\uff0c\u6570\u636e\u9a71\u52a8\u7b97\u6cd5\u81ea\u8eab\u7684\u7f3a\u9677\u4e3a\u7cfb\u7edf\u7684\u5b89\u5168\u7a33\u5b9a\u8fd0\u884c\u5f15\u5165\u4e86\u65b0\u7684\u98ce\u9669\uff0c\u653b\u51fb\u8005\u53ef\u4ee5\u5bf9\u5176\u52a0\u4ee5\u5229\u7528\uff0c\u53d1\u8d77\u53ef\u80fd\u5f15\u53d1\u7cfb\u7edf\u505c\u7535\u751a\u81f3\u5931\u7a33\u7684\u5bf9\u6297\u653b\u51fb\u3002\u9488\u5bf9\u7535\u529bCPS\u4e2d\u6570\u636e\u9a71\u52a8\u7b97\u6cd5\u53ef\u80fd\u906d\u53d7\u7684\u5bf9\u6297\u653b\u51fb\uff0c\u4ece\u5f02\u5e38\u6570\u636e\u5254\u9664\u4e0e\u6062\u590d\u3001\u7b97\u6cd5\u6f0f\u6d1e\u6316\u6398\u4e0e\u4f18\u5316\u3001\u7b97\u6cd5\u81ea\u8eab\u53ef\u89e3\u91ca\u6027\u63d0\u53473\u4e2a\u65b9\u9762\uff0c\u63d0\u51fa\u4e86\u5bf9\u5e94\u7684\u9632\u5fa1\u65b9\u6cd5\uff1a\u5f02\u5e38\u6570\u636e\u8fc7\u6ee4\u5668\u3001\u57fa\u4e8eGAN\u7684\u6f0f\u6d1e\u6316\u6398\u4e0e\u4f18\u5316\u65b9\u6cd5\u3001\u6570\u636e-\u77e5\u8bc6\u878d\u5408\u6a21\u578b\u53ca\u5176\u8bad\u7ec3\u65b9\u6cd5\uff0c\u5e76\u7ecf\u7b97\u4f8b\u5206\u6790\u9a8c\u8bc1\u4e86\u6240\u63d0\u65b9\u6cd5\u7684\u6709\u6548\u6027\u3002"}, {"name": "\u878d\u5408\u98ce\u683c\u8fc1\u79fb\u7684\u5bf9\u6297\u6837\u672c\u751f\u6210\u65b9\u6cd5", "authors": ["\u4e8e\u632f\u534e", "\u6bb7\u6b63", "\u53f6\u9e25", "\u4e1b\u65ed\u4e9a"], "affiliations": ["\u897f\u5b89\u79d1\u6280\u5927\u5b66\u8ba1\u7b97\u673a\u79d1\u5b66\u4e0e\u6280\u672f\u5b66\u9662"], "abstract": "\u9488\u5bf9\u73b0\u6709\u9762\u5411\u76ee\u6807\u68c0\u6d4b\u7684\u5bf9\u6297\u6837\u672c\u751f\u6210\u65b9\u6cd5\u6cdb\u5316\u80fd\u529b\u5f31\u7684\u95ee\u9898\uff0c\u63d0\u51fa\u4e86\u4e00\u79cd\u878d\u5408\u98ce\u683c\u8fc1\u79fb\u7684\u5bf9\u6297\u6837\u672c\u751f\u6210\u65b9\u6cd5\u3002\u9996\u5148\u63d0\u51fa\u4e00\u79cd\u65b0\u7684\u5bf9\u6297\u8865\u4e01\u751f\u6210\u65b9\u6cd5\uff0c\u4f7f\u7528\u98ce\u683c\u8fc1\u79fb\u65b9\u6cd5\u5c06\u98ce\u683c\u56fe\u50cf\u4e0d\u540c\u5c42\u6b21\u7279\u5f81\u63d0\u53d6\u5e76\u878d\u5408\uff0c\u751f\u6210\u65e0\u660e\u663e\u7269\u4f53\u7279\u5f81\u4e14\u7eb9\u7406\u4e30\u5bcc\u7684\u5bf9\u6297\u8865\u4e01\uff1b\u7136\u540e\u5229\u7528\u68af\u5ea6\u7c7b\u6fc0\u6d3b\u6620\u5c04\u65b9\u6cd5\u751f\u6210\u76ee\u6807\u7684\u7279\u5f81\u70ed\u56fe\uff0c\u5c06\u76ee\u6807\u4e0d\u540c\u533a\u57df\u5728\u76ee\u6807\u68c0\u6d4b\u6a21\u578b\u4e2d\u7684\u5173\u952e\u7a0b\u5ea6\u8fdb\u884c\u53ef\u89c6\u5316\u8868\u793a\uff1b\u6700\u540e\u6784\u5efa\u4e00\u79cd\u70ed\u56fe\u5f15\u5bfc\u673a\u5236\uff0c\u5f15\u5bfc\u5bf9\u6297\u8865\u4e01\u5728\u653b\u51fb\u76ee\u6807\u7684\u5173\u952e\u4f4d\u7f6e\u8fdb\u884c\u653b\u51fb\u4ee5\u63d0\u9ad8\u5176\u6cdb\u5316\u80fd\u529b\uff0c\u751f\u6210\u6700\u7ec8\u5bf9\u6297\u6837\u672c\u3002\u4e3a\u4e86\u9a8c\u8bc1\u6240\u63d0\u65b9\u6cd5\u7684\u6027\u80fd\uff0c\u5728DroNet\u5ba4\u5916\u6570\u636e\u96c6\u4e0a\u8fdb\u884c\u4e86\u5b9e\u9a8c\u3002\u5b9e\u9a8c\u7ed3\u679c\u8868\u660e\uff0c\u6240\u63d0\u65b9\u6cd5\u9488\u5bf9\u5355\u9636\u6bb5\u76ee\u6807\u68c0\u6d4b\u6a21\u578bYOLOv5\u751f\u6210\u7684\u5bf9\u6297\u6837\u672c\uff0c\u653b\u51fb\u6210\u529f\u7387\u53ef\u8fbe84.07%\uff0c\u5c06\u5176\u5e94\u7528\u4e8e\u653b\u51fb\u4e24\u9636\u6bb5\u76ee\u6807\u68c0\u6d4b\u6a21\u578bFaster R-CNN\u65f6\uff0c\u653b\u51fb\u6210\u529f\u7387\u4ecd\u4fdd\u6301\u572867.65%\u3002\u4e0e\u6240\u5bf9\u6bd4\u7684\u4e3b\u6d41\u65b9\u6cd5\u76f8\u6bd4\uff0c\u6240\u63d0\u65b9\u6cd5\u751f\u6210\u7684\u5bf9\u6297\u6837\u672c\u653b\u51fb\u6548\u679c\u8f83\u597d\uff0c\u800c\u4e14\u5177\u6709\u826f\u597d\u7684\u6cdb\u5316\u80fd\u529b\u3002"}, {"name": "\u57fa\u4e8eSE-AdvGAN\u7684\u56fe\u50cf\u5bf9\u6297\u6837\u672c\u751f\u6210\u65b9\u6cd5\u7814\u7a76", "authors": ["\u8d75\u5b8f", "\u5b8b\u99a5\u8363", "\u674e\u6587\u6539"], "affiliations": ["\u5170\u5dde\u7406\u5de5\u5927\u5b66\u8ba1\u7b97\u673a\u4e0e\u901a\u4fe1\u5b66\u9662"], "abstract": "\u5bf9\u6297\u6837\u672c\u662f\u8bc4\u4f30\u6df1\u5ea6\u795e\u7ecf\u7f51\u7edc\u9c81\u68d2\u6027\u548c\u63ed\u793a\u5176\u6f5c\u5728\u5b89\u5168\u9690\u60a3\u7684\u91cd\u8981\u624b\u6bb5\u3002\u57fa\u4e8e\u751f\u6210\u5bf9\u6297\u7f51\u7edc(GAN)\u7684\u5bf9\u6297\u6837\u672c\u751f\u6210\u65b9\u6cd5(AdvGAN)\u5728\u751f\u6210\u56fe\u50cf\u5bf9\u6297\u6837\u672c\u65b9\u9762\u53d6\u5f97\u663e\u8457\u8fdb\u5c55\uff0c\u4f46\u8be5\u65b9\u6cd5\u751f\u6210\u7684\u6270\u52a8\u7a00\u758f\u6027\u4e0d\u8db3\u4e14\u5e45\u5ea6\u8f83\u5927\uff0c\u5bfc\u81f4\u5bf9\u6297\u6837\u672c\u7684\u771f\u5b9e\u6027\u8f83\u4f4e\u3002\u4e3a\u89e3\u51b3\u8fd9\u4e00\u95ee\u9898\uff0c\u57fa\u4e8eAdvGAN\u63d0\u51fa\u4e00\u79cd\u6539\u8fdb\u7684\u56fe\u50cf\u5bf9\u6297\u6837\u672c\u751f\u6210\u65b9\u6cd5Squeeze-and-Excitation-AdvGAN(SE-AdvGAN)\u3002SE-AdvGAN\u901a\u8fc7\u6784\u9020SE\u6ce8\u610f\u529b\u751f\u6210\u5668\u548cSE\u6b8b\u5dee\u5224\u522b\u5668\u63d0\u9ad8\u6270\u52a8\u7684\u7a00\u758f\u6027\u3002SE\u6ce8\u610f\u529b\u751f\u6210\u5668\u7528\u4e8e\u63d0\u53d6\u56fe\u50cf\u5173\u952e\u7279\u5f81\u9650\u5236\u6270\u52a8\u751f\u6210\u4f4d\u7f6e\uff0cSE\u6b8b\u5dee\u5224\u522b\u5668\u6307\u5bfc\u751f\u6210\u5668\u907f\u514d\u751f\u6210\u65e0\u5173\u6270\u52a8\u3002\u540c\u65f6\uff0c\u5728SE\u6ce8\u610f\u529b\u751f\u6210\u5668\u7684\u635f\u5931\u51fd\u6570\u4e2d\u52a0\u5165\u4ee5\u25a0\u8303\u6570\u4e3a\u57fa\u51c6\u7684\u8fb9\u754c\u635f\u5931\u4ee5\u9650\u5236\u6270\u52a8\u7684\u5e45\u5ea6\uff0c\u4ece\u800c\u63d0\u9ad8\u5bf9\u6297\u6837\u672c\u7684\u771f\u5b9e\u6027\u3002\u5b9e\u9a8c\u7ed3\u679c\u8868\u660e\uff0c\u5728\u767d\u76d2\u653b\u51fb\u573a\u666f\u4e0b\uff0cSE-AdvGAN\u76f8\u8f83\u4e8e\u73b0\u6709\u65b9\u6cd5\u751f\u6210\u7684\u5bf9\u6297\u6837\u672c\u6270\u52a8\u7a00\u758f\u6027\u9ad8\u3001\u5e45\u5ea6\u5c0f\uff0c\u5e76\u4e14\u5728\u4e0d\u540c\u76ee\u6807\u6a21\u578b\u4e0a\u5747\u53d6\u5f97\u66f4\u597d\u7684\u653b\u51fb\u6548\u679c\uff0c\u8bf4\u660eSE-AdvGAN\u751f\u6210\u7684\u9ad8\u8d28\u91cf\u5bf9\u6297\u6837\u672c\u53ef\u4ee5\u66f4\u6709\u6548\u5730\u8bc4\u4f30\u6df1\u5ea6\u795e\u7ecf\u7f51\u7edc\u6a21\u578b\u7684\u9c81\u68d2\u6027\u3002"}, {"name": "\u9762\u5411\u6f0f\u6d1e\u68c0\u6d4b\u6a21\u578b\u7684\u5f3a\u5316\u5b66\u4e60\u5f0f\u5bf9\u6297\u653b\u51fb\u65b9\u6cd5", "authors": ["\u9648\u601d\u71361,2", "\u5434\u656c\u5f811,3", "\u51cc\u79651", "\u7f57\u5929\u60a61", "\u5218\u9553\u715c1,2", "\u6b66\u5ef6\u519b1,3"], "affiliations": ["1. \u4e2d\u56fd\u79d1\u5b66\u9662\u8f6f\u4ef6\u7814\u7a76\u6240\u667a\u80fd\u8f6f\u4ef6\u7814\u7a76\u4e2d\u5fc3", "2. \u4e2d\u56fd\u79d1\u5b66\u9662\u5927\u5b66", "3. \u8ba1\u7b97\u673a\u79d1\u5b66\u56fd\u5bb6\u91cd\u70b9\u5b9e\u9a8c\u5ba4(\u4e2d\u56fd\u79d1\u5b66\u9662\u8f6f\u4ef6\u7814\u7a76\u6240)"], "abstract": "\u57fa\u4e8e\u6df1\u5ea6\u5b66\u4e60\u7684\u4ee3\u7801\u6f0f\u6d1e\u68c0\u6d4b\u6a21\u578b\u56e0\u5176\u68c0\u6d4b\u6548\u7387\u9ad8\u548c\u7cbe\u5ea6\u51c6\u7684\u4f18\u52bf,\u9010\u6b65\u6210\u4e3a\u68c0\u6d4b\u8f6f\u4ef6\u6f0f\u6d1e\u7684\u91cd\u8981\u65b9\u6cd5,\u5e76\u5728\u4ee3\u7801\u6258\u7ba1\u5e73\u53f0Github\u7684\u4ee3\u7801\u5ba1\u8ba1\u670d\u52a1\u4e2d\u53d1\u6325\u91cd\u8981\u4f5c\u7528.\u7136\u800c,\u6df1\u5ea6\u795e\u7ecf\u7f51\u7edc\u5df2\u88ab\u8bc1\u660e\u5bb9\u6613\u53d7\u5230\u5bf9\u6297\u653b\u51fb\u7684\u5e72\u6270,\u8fd9\u5bfc\u81f4\u57fa\u4e8e\u6df1\u5ea6\u5b66\u4e60\u7684\u6f0f\u6d1e\u68c0\u6d4b\u6a21\u578b\u5b58\u5728\u906d\u53d7\u653b\u51fb\u3001\u964d\u4f4e\u68c0\u6d4b\u51c6\u786e\u7387\u7684\u98ce\u9669.\u56e0\u6b64,\u6784\u5efa\u9488\u5bf9\u6f0f\u6d1e\u68c0\u6d4b\u6a21\u578b\u7684\u5bf9\u6297\u653b\u51fb\u4e0d\u4ec5\u53ef\u4ee5\u53d1\u6398\u6b64\u7c7b\u6a21\u578b\u7684\u5b89\u5168\u7f3a\u9677,\u800c\u4e14\u6709\u52a9\u4e8e\u8bc4\u4f30\u6a21\u578b\u7684\u9c81\u68d2\u6027,\u8fdb\u800c\u901a\u8fc7\u76f8\u5e94\u7684\u65b9\u6cd5\u63d0\u5347\u6a21\u578b\u6027\u80fd.\u4f46\u73b0\u6709\u7684\u9762\u5411\u6f0f\u6d1e\u68c0\u6d4b\u6a21\u578b\u7684\u5bf9\u6297\u653b\u51fb\u65b9\u6cd5\u4f9d\u8d56\u4e8e\u901a\u7528\u7684\u4ee3\u7801\u8f6c\u6362\u5de5\u5177,\u5e76\u672a\u63d0\u51fa\u9488\u5bf9\u6027\u7684\u4ee3\u7801\u6270\u52a8\u64cd\u4f5c\u548c\u51b3\u7b56\u7b97\u6cd5,\u56e0\u6b64\u96be\u4ee5\u751f\u6210\u6709\u6548\u7684\u5bf9\u6297\u6837\u672c,\u4e14\u5bf9\u6297\u6837\u672c\u7684\u5408\u6cd5\u6027\u4f9d\u8d56\u4e8e\u4eba\u5de5\u68c0\u67e5.\u9488\u5bf9\u4e0a\u8ff0\u95ee\u9898,\u63d0\u51fa\u4e86\u4e00\u79cd\u9762\u5411\u6f0f\u6d1e\u68c0\u6d4b\u6a21\u578b\u7684\u5f3a\u5316\u5b66\u4e60\u5f0f\u5bf9\u6297\u653b\u51fb\u65b9\u6cd5.\u8be5\u65b9\u6cd5\u9996\u5148\u8bbe\u8ba1\u4e86\u4e00\u7cfb\u5217\u8bed\u4e49\u7ea6\u675f\u4e14\u6f0f\u6d1e\u4fdd\u7559\u7684\u4ee3\u7801\u6270\u52a8\u64cd\u4f5c\u4f5c\u4e3a\u6270\u52a8\u96c6\u5408;\u5176\u6b21,\u5c06\u5177\u5907\u6f0f\u6d1e\u7684\u4ee3\u7801\u6837\u672c\u4f5c\u4e3a\u8f93\u5165,\u5229\u7528\u5f3a\u5316\u5b66\u4e60\u6a21\u578b\u9009\u53d6\u5177\u4f53\u7684\u6270\u52a8\u64cd\u4f5c\u5e8f\u5217;\u6700\u540e,\u6839\u636e\u4ee3\u7801\u6837\u672c\u7684\u8bed\u6cd5\u6811\u8282\u70b9\u7c7b\u578b\u5bfb\u627e\u6270\u52a8\u7684\u6f5c\u5728\u4f4d\u7f6e,\u8fdb\u884c\u4ee3\u7801\u8f6c\u6362,\u4ece\u800c\u751f\u6210\u5bf9\u6297\u6837\u672c.\u57fa\u4e8eSARD\u548cNVD\u6784\u5efa\u4e86\u4e24\u4e2a\u5b9e\u9a8c\u6570\u636e\u96c6,\u517114278\u4e2a\u4ee3\u7801\u6837\u672c,\u5e76\u4ee5\u6b64\u8bad\u7ec3\u4e864\u4e2a\u5177\u5907\u4e0d\u540c\u7279\u70b9\u7684\u6f0f\u6d1e\u68c0\u6d4b\u6a21\u578b\u4f5c\u4e3a\u653b\u51fb\u76ee\u6807.\u9488\u5bf9\u6bcf\u4e2a\u76ee\u6807\u6a21\u578b,\u8bad\u7ec3\u4e86\u4e00\u4e2a\u5f3a\u5316\u5b66\u4e60\u7f51\u7edc\u8fdb\u884c\u5bf9\u6297\u653b\u51fb.\u7ed3\u679c\u663e\u793a,\u8be5\u653b\u51fb\u65b9\u6cd5\u5bfc\u81f4\u6a21\u578b\u7684\u53ec\u56de\u7387\u964d\u4f4e\u4e8674.34%,\u653b\u51fb\u6210\u529f\u7387\u8fbe\u523096.71%,\u76f8\u8f83\u57fa\u7ebf\u65b9\u6cd5,\u653b\u51fb\u6210\u529f\u7387\u5e73\u5747\u63d0\u5347\u4e8668.76%.\u5b9e\u9a8c\u8bc1\u660e\u4e86\u5f53\u524d\u7684\u6f0f\u6d1e\u68c0\u6d4b\u6a21\u578b\u5b58\u5728\u88ab\u653b\u51fb\u7684\u98ce\u9669,\u9700\u8981\u8fdb\u4e00\u6b65\u7814\u7a76\u63d0\u5347\u6a21\u578b\u7684\u9c81\u68d2\u6027. "}]
\ No newline at end of file
diff --git a/result1.json b/result1.json
new file mode 100644
index 0000000..1d73ce5
--- /dev/null
+++ b/result1.json
@@ -0,0 +1,518 @@
+[
+ {
+ "name": "\u57fa\u4e8e\u6570\u636e\u589e\u5f3a\u548c\u6807\u7b7e\u566a\u58f0\u7684\u5feb\u901f\u5bf9\u6297\u8bad\u7ec3\u65b9\u6cd5",
+ "authors": [
+ "\u5b8b\u9038\u98de",
+ "\u67f3\u6bc5"
+ ],
+ "affiliations": [
+ "\u5e7f\u4e1c\u5de5\u4e1a\u5927\u5b66\u8ba1\u7b97\u673a\u5b66\u9662"
+ ],
+ "abstract": "\u5bf9\u6297\u8bad\u7ec3\u662f\u4fdd\u62a4\u5206\u7c7b\u6a21\u578b\u514d\u53d7\u5bf9\u6297\u6027\u653b\u51fb\u7684\u6709\u6548\u9632\u5fa1\u65b9\u6cd5\u3002\u7136\u800c\uff0c\u7531\u4e8e\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u751f\u6210\u5f3a\u5bf9\u6297\u6837\u672c\u7684\u9ad8\u6210\u672c\uff0c\u53ef\u80fd\u9700\u8981\u6570\u91cf\u7ea7\u7684\u989d\u5916\u8bad\u7ec3\u65f6\u95f4\u3002\u4e3a\u4e86\u514b\u670d\u8fd9\u4e00\u9650\u5236\uff0c\u57fa\u4e8e\u5355\u6b65\u653b\u51fb\u7684\u5feb\u901f\u5bf9\u6297\u8bad\u7ec3\u5df2\u88ab\u63a2\u7d22\u3002\u4ee5\u5f80\u7684\u5de5\u4f5c\u4ece\u6837\u672c\u521d\u59cb\u5316\u3001\u635f\u5931\u6b63\u5219\u5316\u548c\u8bad\u7ec3\u7b56\u7565\u7b49\u4e0d\u540c\u89d2\u5ea6\u5bf9\u5feb\u901f\u5bf9\u6297\u8bad\u7ec3\u8fdb\u884c\u4e86\u6539\u8fdb\u3002\u7136\u800c\uff0c\u5728\u5904\u7406\u5927\u6270\u52a8\u9884\u7b97\u65f6\u9047\u5230\u4e86\u707e\u96be\u6027\u8fc7\u62df\u5408\u3002\u57fa\u4e8e\u6570\u636e\u589e\u5f3a\u4e0e\u6807\u7b7e\u566a\u58f0\u7684\u5feb\u901f\u5bf9\u6297\u8bad\u7ec3\u65b9\u6cd5\u88ab\u63d0\u51fa\uff0c\u4ee5\u89e3\u51b3\u6b64\u56f0\u96be\u3002\u521d\u59cb\u9636\u6bb5\uff0c\u5bf9\u539f\u59cb\u6837\u672c\u6267\u884c\u591a\u79cd\u56fe\u50cf\u8f6c\u6362\uff0c\u5e76\u5f15\u5165\u968f\u673a\u566a\u58f0\u4ee5\u5b9e\u65bd\u6570\u636e\u589e\u5f3a\uff1b\u63a5\u7740\uff0c\u5c11\u91cf\u6807\u7b7e\u566a\u58f0\u88ab\u6ce8\u5165\uff1b\u7136\u540e\u4f7f\u7528\u589e\u5f3a\u7684\u6570\u636e\u751f\u6210\u5bf9\u6297\u6837\u672c\u7528\u4e8e\u6a21\u578b\u8bad\u7ec3\uff1b\u6700\u540e\uff0c\u6839\u636e\u5bf9\u6297\u9c81\u68d2\u6027\u6d4b\u8bd5\u7ed3\u679c\u81ea\u9002\u5e94\u5730\u8c03\u6574\u6807\u7b7e\u566a\u58f0\u7387\u3002\u5728CIFAR-10\u3001CIFAR-100\u6570\u636e\u96c6\u4e0a\u7684\u5168\u9762\u5b9e\u9a8c\u7ed3\u679c\u8868\u660e\uff0c\u76f8\u8f83\u4e8eFGSM-MEP\uff0c\u6240\u63d0\u65b9\u6cd5\u5728\u5927\u6270\u52a8\u9884\u7b97\u6761\u4ef6\u4e0b\uff0c\u5728\u4e24\u4e2a\u6570\u636e\u96c6\u4e0a\u7684AA\u4e0a\u5206\u522b\u63d0\u5347\u4e864.63\u548c5.38\u4e2a\u767e\u5206\u70b9\u3002\u7ecf\u5b9e\u9a8c\u8bc1\u660e\uff0c\u65b0\u63d0\u51fa\u7684\u65b9\u6848\u53ef\u4ee5\u6709\u6548\u5730\u5904\u7406\u5927\u7684\u6270\u52a8\u9884\u7b97\u4e0b\u707e\u96be\u6027\u8fc7\u62df\u5408\u95ee\u9898\uff0c\u5e76\u663e\u8457\u589e\u5f3a\u6a21\u578b\u7684\u5bf9\u6297\u9c81\u68d2\u6027\u3002"
+ },
+ {
+ "name": "\u57fa\u4e8e\u6761\u4ef6\u6269\u6563\u6a21\u578b\u7684\u56fe\u50cf\u5206\u7c7b\u5bf9\u6297\u6837\u672c\u9632\u5fa1\u65b9\u6cd5",
+ "authors": [
+ "\u9648\u5b50\u6c11",
+ "\u5173\u5fd7\u6d9b"
+ ],
+ "affiliations": [
+ "\u534e\u5317\u7535\u529b\u5927\u5b66\u63a7\u5236\u4e0e\u8ba1\u7b97\u673a\u5b66\u9662"
+ ],
+ "abstract": "\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u5728\u56fe\u50cf\u5206\u7c7b\u7b49\u9886\u57df\u53d6\u5f97\u4ee4\u4eba\u5370\u8c61\u6df1\u523b\u7684\u7ed3\u679c\uff0c\u4f46\u662f\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u5bb9\u6613\u53d7\u5230\u5bf9\u6297\u6837\u672c\u7684\u5e72\u6270\u5a01\u80c1\uff0c\u653b\u51fb\u8005\u901a\u8fc7\u5bf9\u6297\u6837\u672c\u5236\u4f5c\u7b97\u6cd5\uff0c\u7cbe\u5fc3\u8bbe\u8ba1\u5fae\u5c0f\u6270\u52a8\uff0c\u6784\u9020\u8089\u773c\u96be\u4ee5\u5206\u8fa8\u5374\u80fd\u5f15\u53d1\u6a21\u578b\u8bef\u5206\u7c7b\u7684\u5bf9\u6297\u6837\u672c\uff0c\u7ed9\u56fe\u50cf\u5206\u7c7b\u7b49\u6df1\u5ea6\u5b66\u4e60\u5e94\u7528\u5e26\u6765\u4e25\u91cd\u7684\u5b89\u5168\u9690\u60a3\u3002\u4e3a\u63d0\u5347\u56fe\u50cf\u5206\u7c7b\u6a21\u578b\u7684\u9c81\u68d2\u6027\uff0c\u672c\u6587\u5229\u7528\u6761\u4ef6\u6269\u6563\u6a21\u578b\uff0c\u63d0\u51fa\u4e86\u4e00\u79cd\u7efc\u5408\u5bf9\u6297\u6837\u672c\u68c0\u6d4b\u548c\u5bf9\u6297\u6837\u672c\u51c0\u5316\u7684\u5bf9\u6297\u6837\u672c\u9632\u5fa1\u65b9\u6cd5\u3002\u5728\u4e0d\u4fee\u6539\u76ee\u6807\u6a21\u578b\u7684\u57fa\u7840\u4e0a\uff0c\u68c0\u6d4b\u5e76\u51c0\u5316\u5bf9\u6297\u6837\u672c\uff0c\u63d0\u5347\u76ee\u6807\u6a21\u578b\u9c81\u68d2\u6027\u3002\u672c\u65b9\u6cd5\u5305\u62ec\u5bf9\u6297\u6837\u672c\u68c0\u6d4b\u548c\u5bf9\u6297\u6837\u672c\u51c0\u5316\u4e24\u4e2a\u6a21\u5757\u3002\u5bf9\u4e8e\u5bf9\u6297\u6837\u672c\u68c0\u6d4b\uff0c\u91c7\u7528\u4e0d\u4e00\u81f4\u6027\u589e\u5f3a\uff0c\u901a\u8fc7\u8bad\u7ec3\u4e00\u4e2a\u878d\u5165\u76ee\u6807\u6a21\u578b\u9ad8\u7ef4\u7279\u5f81\u548c\u56fe\u7247\u57fa\u672c\u7279\u5f81\u7684\u56fe\u50cf\u4fee\u590d\u6a21\u578b\uff0c\u6bd4\u8f83\u521d\u59cb\u8f93\u5165\u548c\u4fee\u590d\u7ed3\u679c\u7684\u4e0d\u4e00\u81f4\u6027\uff0c\u68c0\u6d4b\u5bf9\u6297\u6837\u672c\uff1b\u5bf9\u4e8e\u5bf9\u6297\u6837\u672c\u51c0\u5316\uff0c\u91c7\u7528\u7aef\u5230\u7aef\u7684\u5bf9\u6297\u6837\u672c\u51c0\u5316\u65b9\u5f0f\uff0c\u5728\u53bb\u566a\u6a21\u578b\u6267\u884c\u8fc7\u7a0b\u4e2d\u52a0\u5165\u56fe\u7247\u4f2a\u5f71\uff0c\u5b9e\u73b0\u5bf9\u6297\u6837\u672c\u51c0\u5316\u3002\u5728\u4fdd\u8bc1\u76ee\u6807\u6a21\u578b\u7cbe\u5ea6\u7684\u524d\u63d0\u4e0b\uff0c\u5728\u76ee\u6807\u6a21\u578b\u524d\u589e\u52a0\u5bf9\u6297\u6837\u672c\u68c0\u6d4b\u548c\u51c0\u5316\u6a21\u5757\uff0c\u6839\u636e\u68c0\u6d4b\u7ed3\u679c\uff0c\u9009\u53d6\u76f8\u5e94\u7684\u51c0\u5316\u7b56\u7565\uff0c\u4ece\u800c\u6d88\u9664\u5bf9\u6297\u6837\u672c\uff0c\u63d0\u5347\u76ee\u6807\u6a21\u578b\u7684\u9c81\u68d2\u6027\u3002\u5728CIFAR10\u6570\u636e\u96c6\u548cCIFAR100\u6570\u636e\u96c6\u4e0a\u4e0e\u73b0\u6709\u65b9\u6cd5\u8fdb\u884c\u5bf9\u6bd4\u5b9e\u9a8c\u3002\u5bf9\u4e8e\u6270\u52a8\u8f83\u5c0f\u7684\u5bf9\u6297\u6837\u672c\uff0c\u672c\u65b9\u6cd5\u7684\u68c0\u6d4b\u7cbe\u5ea6\u6bd4Argos\u65b9\u6cd5\u63d0\u5347\u4e865-9\u4e2a\u767e\u5206\u70b9\uff1b\u76f8\u6bd4\u4e8eADP\u65b9\u6cd5\uff0c\u672c\u65b9\u6cd5\u5728\u9762\u5bf9\u4e0d\u540c\u79cd\u7c7b\u5bf9\u6297\u6837\u672c\u65f6\u9632\u5fa1\u6548\u679c\u66f4\u7a33\u5b9a\uff0c\u4e14\u5728BPDA\u653b\u51fb\u4e0b\uff0c\u672c\u65b9\u6cd5\u7684\u5bf9\u6297\u6837\u672c\u51c0\u5316\u6548\u679c\u8f83ADP\u63d0\u5347\u4e861.3%\u3002 "
+ },
+ {
+ "name": "\u57fa\u4e8e\u63a9\u6a21\u63d0\u53d6\u7684SAR\u56fe\u50cf\u5bf9\u6297\u6837\u672c\u751f\u6210\u65b9\u6cd5",
+ "authors": [
+ "\u7ae0\u575a\u6b661",
+ "\u80fd\u8c6a1",
+ "\u674e\u67701",
+ "\u94b1\u5efa\u534e2"
+ ],
+ "affiliations": [
+ "1. \u676d\u5dde\u7535\u5b50\u79d1\u6280\u5927\u5b66",
+ "2. \u4e2d\u56fd\u8054\u901a(\u6d59\u6c5f)\u4ea7\u4e1a\u4e92\u8054\u7f51\u6709\u9650\u516c\u53f8"
+ ],
+ "abstract": "SAR\uff08SyntheticApertureRadar\uff0c\u5408\u6210\u5b54\u5f84\u96f7\u8fbe\uff09\u56fe\u50cf\u7684\u5bf9\u6297\u6837\u672c\u751f\u6210\u5728\u5f53\u524d\u5df2\u7ecf\u6709\u5f88\u591a\u65b9\u6cd5\uff0c\u4f46\u4ecd\u5b58\u5728\u7740\u5bf9\u6297\u6837\u672c\u6270\u52a8\u91cf\u8f83\u5927\u3001\u8bad\u7ec3\u4e0d\u7a33\u5b9a\u4ee5\u53ca\u5bf9\u6297\u6837\u672c\u7684\u8d28\u91cf\u65e0\u6cd5\u4fdd\u8bc1\u7b49\u95ee\u9898\u3002\u9488\u5bf9\u4e0a\u8ff0\u95ee\u9898\uff0c\u63d0\u51fa\u4e86\u4e00\u79cdSAR\u56fe\u50cf\u5bf9\u6297\u6837\u672c\u751f\u6210\u6a21\u578b\uff0c\u8be5\u6a21\u578b\u57fa\u4e8eAdvGAN\u6a21\u578b\u67b6\u6784\uff0c\u9996\u5148\u6839\u636eSAR\u56fe\u50cf\u7684\u7279\u70b9\u8bbe\u8ba1\u4e86\u4e00\u79cd\u7531\u589e\u5f3aLee\u6ee4\u6ce2\u5668\u548cOTSU\uff08\u6700\u5927\u7c7b\u95f4\u65b9\u5dee\u6cd5\uff09\u81ea\u9002\u5e94\u9608\u503c\u5206\u5272\u7b49\u6a21\u5757\u7ec4\u6210\u7684\u63a9\u6a21\u63d0\u53d6\u6a21\u5757\uff0c\u8fd9\u79cd\u65b9\u6cd5\u4ea7\u751f\u7684\u6270\u52a8\u91cf\u66f4\u5c0f\uff0c\u4e0e\u539f\u59cb\u6837\u672c\u7684SSIM\uff08Structural Similarity\uff0c\u7ed3\u6784\u76f8\u4f3c\u6027\uff09\u503c\u8fbe\u52300.997\u4ee5\u4e0a\u3002\u5176\u6b21\u5c06\u6539\u8fdb\u7684RaGAN\u635f\u5931\u5f15\u5165\u5230AdvGAN\u4e2d\uff0c\u4f7f\u7528\u76f8\u5bf9\u5747\u503c\u5224\u522b\u5668\uff0c\u8ba9\u5224\u522b\u5668\u5728\u8bad\u7ec3\u65f6\u540c\u65f6\u4f9d\u8d56\u4e8e\u771f\u5b9e\u6570\u636e\u548c\u751f\u6210\u7684\u6570\u636e\uff0c\u63d0\u9ad8\u4e86\u8bad\u7ec3\u7684\u7a33\u5b9a\u6027\u4e0e\u653b\u51fb\u6548\u679c\u3002\u5728MSTAR\u6570\u636e\u96c6\u4e0a\u4e0e\u76f8\u5173\u65b9\u6cd5\u8fdb\u884c\u4e86\u5b9e\u9a8c\u5bf9\u6bd4\uff0c\u5b9e\u9a8c\u8868\u660e\uff0c\u6b64\u65b9\u6cd5\u751f\u6210\u7684SAR\u56fe\u50cf\u5bf9\u6297\u6837\u672c\u5728\u653b\u51fb\u9632\u5fa1\u6a21\u578b\u65f6\u7684\u653b\u51fb\u6210\u529f\u7387\u8f83\u4f20\u7edf\u65b9\u6cd5\u63d0\u9ad8\u4e8610%\uff5e15%\u3002"
+ },
+ {
+ "name": "\u56fe\u795e\u7ecf\u7f51\u7edc\u5bf9\u6297\u653b\u51fb\u4e0e\u9c81\u68d2\u6027\u8bc4\u6d4b\u524d\u6cbf\u8fdb\u5c55",
+ "authors": [
+ "\u5434\u6d9b1,2,3",
+ "\u66f9\u65b0\u6c761,2",
+ "\u5148\u5174\u5e731,2,3",
+ "\u8881\u97161,2",
+ "\u5f20\u6b8a3",
+ "\u5d14\u707f\u4e00\u661f1,2",
+ "\u7530\u4f833"
+ ],
+ "affiliations": [
+ "1. \u91cd\u5e86\u90ae\u7535\u5927\u5b66\u7f51\u7edc\u7a7a\u95f4\u5b89\u5168\u4e0e\u4fe1\u606f\u6cd5\u5b66\u9662",
+ "2. \u91cd\u5e86\u5e02\u7f51\u7edc\u4e0e\u4fe1\u606f\u5b89\u5168\u6280\u672f\u5de5\u7a0b\u5b9e\u9a8c\u5ba4",
+ "3. \u91cd\u5e86\u90ae\u7535\u5927\u5b66-\u91cd\u5e86\u4e2d\u56fd\u4e09\u5ce1\u535a\u7269\u9986\u667a\u6167\u6587\u535a\u8054\u5408\u5b9e\u9a8c\u5ba4"
+ ],
+ "abstract": "\u8fd1\u5e74\u6765\uff0c\u56fe\u795e\u7ecf\u7f51\u7edc\uff08GNNs\uff09\u9010\u6e10\u6210\u4e3a\u4eba\u5de5\u667a\u80fd\u7684\u91cd\u8981\u7814\u7a76\u65b9\u5411\u3002\u7136\u800c\uff0cGNNs\u7684\u5bf9\u6297\u8106\u5f31\u6027\u4f7f\u5176\u5b9e\u9645\u5e94\u7528\u9762\u4e34\u4e25\u5cfb\u6311\u6218\u3002\u4e3a\u4e86\u5168\u9762\u8ba4\u8bc6GNNs\u5bf9\u6297\u653b\u51fb\u4e0e\u9c81\u68d2\u6027\u8bc4\u6d4b\u7684\u7814\u7a76\u5de5\u4f5c\uff0c\u5bf9\u76f8\u5173\u524d\u6cbf\u8fdb\u5c55\u8fdb\u884c\u68b3\u7406\u548c\u5206\u6790\u8ba8\u8bba\u3002\u9996\u5148\uff0c\u4ecb\u7ecdGNNs\u5bf9\u6297\u653b\u51fb\u7684\u7814\u7a76\u80cc\u666f\uff0c\u7ed9\u51faGNNs\u5bf9\u6297\u653b\u51fb\u7684\u5f62\u5f0f\u5316\u5b9a\u4e49\uff0c\u9610\u8ff0GNNs\u5bf9\u6297\u653b\u51fb\u53ca\u9c81\u68d2\u6027\u8bc4\u6d4b\u7684\u7814\u7a76\u6846\u67b6\u548c\u57fa\u672c\u6982\u5ff5\u3002\u7136\u540e\uff0c\u5bf9GNNs\u5bf9\u6297\u653b\u51fb\u9886\u57df\u6240\u63d0\u5177\u4f53\u65b9\u6cd5\u8fdb\u884c\u4e86\u603b\u7ed3\u548c\u68b3\u7406\uff0c\u5e76\u5bf9\u5176\u4e2d\u7684\u524d\u6cbf\u65b9\u6cd5\u4ece\u5bf9\u6297\u653b\u51fb\u7c7b\u578b\u548c\u653b\u51fb\u76ee\u6807\u8303\u56f4\u7684\u89d2\u5ea6\u8fdb\u884c\u8be6\u7ec6\u5206\u7c7b\u9610\u8ff0\uff0c\u5206\u6790\u4e86\u5b83\u4eec\u7684\u5de5\u4f5c\u673a\u5236\u3001\u539f\u7406\u548c\u4f18\u7f3a\u70b9\u3002\u5176\u6b21\uff0c\u8003\u8651\u5230\u57fa\u4e8e\u5bf9\u6297\u653b\u51fb\u7684\u6a21\u578b\u9c81\u68d2\u6027\u8bc4\u6d4b\u4f9d\u8d56\u4e8e\u5bf9\u6297\u653b\u51fb\u65b9\u6cd5\u7684\u9009\u62e9\u548c\u5bf9\u6297\u6270\u52a8\u7a0b\u5ea6\uff0c\u53ea\u80fd\u5b9e\u73b0\u95f4\u63a5\u3001\u5c40\u90e8\u7684\u8bc4\u4ef7\uff0c\u96be\u4ee5\u5168\u9762\u53cd\u6620\u6a21\u578b\u9c81\u68d2\u6027\u7684\u672c\u8d28\u7279\u5f81\uff0c\u4ece\u800c\u7740\u91cd\u5bf9\u6a21\u578b\u9c81\u68d2\u6027\u7684\u76f4\u63a5\u8bc4\u6d4b\u6307\u6807\u8fdb\u884c\u4e86\u68b3\u7406\u548c\u5206\u6790\u3002\u5728\u6b64\u57fa\u7840\u4e0a\uff0c\u4e3a\u4e86\u652f\u6491GNNs\u5bf9\u6297\u653b\u51fb\u65b9\u6cd5\u548c\u9c81\u68d2\u6027\u6a21\u578b\u7684\u8bbe\u8ba1\u4e0e\u8bc4\u4ef7\uff0c\u901a\u8fc7\u5b9e\u9a8c\u4ece\u6613\u5b9e\u73b0\u7a0b\u5ea6\u3001\u51c6\u786e\u6027\u3001\u6267\u884c\u65f6\u95f4\u7b49\u65b9\u9762\u5bf9\u4ee3\u8868\u6027\u7684GNNs\u5bf9\u6297\u653b\u51fb\u65b9\u6cd5\u8fdb\u884c\u4e86\u5bf9\u6bd4\u5206\u6790\u3002\u6700\u540e\uff0c\u5bf9\u5b58\u5728\u7684\u6311\u6218\u548c\u672a\u6765\u7814\u7a76\u65b9\u5411\u8fdb\u884c\u5c55\u671b\u3002\u603b\u4f53\u800c\u8a00\uff0c\u76ee\u524dGNNs\u5bf9\u6297\u9c81\u68d2\u6027\u7814\u7a76\u4ee5\u53cd\u590d\u5b9e\u9a8c\u4e3a\u4e3b\u3001\u7f3a\u4e4f\u5177\u6709\u6307\u5bfc\u6027\u7684\u7406\u8bba\u6846\u67b6\u3002\u5982\u4f55\u4fdd\u969c\u57fa\u4e8eGNNs\u7684\u6df1\u5ea6\u667a\u80fd\u7cfb\u7edf\u7684\u53ef\u4fe1\u6027\uff0c\u4ecd\u9700\u8fdb\u4e00\u6b65\u7cfb\u7edf\u6027\u7684\u57fa\u7840\u7406\u8bba\u7814\u7a76\u3002 "
+ },
+ {
+ "name": "\u8bca\u65ad\u548c\u63d0\u9ad8\u8fc1\u79fb\u5b66\u4e60\u6a21\u578b\u9c81\u68d2\u6027\u7684\u53ef\u89c6\u5206\u6790\u65b9\u6cd5",
+ "authors": [
+ "\u5218\u771f",
+ "\u989c\u83c1",
+ "\u5434\u5146\u56fd",
+ "\u6797\u83f2",
+ "\u5434\u5411\u9633"
+ ],
+ "affiliations": [
+ "\u676d\u5dde\u7535\u5b50\u79d1\u6280\u5927\u5b66\u8ba1\u7b97\u673a\u5b66\u9662"
+ ],
+ "abstract": "\u867d\u7136\u8fc1\u79fb\u5b66\u4e60\u53ef\u4ee5\u4f7f\u5f00\u53d1\u4eba\u5458\u6839\u636e\u590d\u6742\u7684\u9884\u8bad\u7ec3\u6a21\u578b(\u6559\u5e08\u6a21\u578b)\u6784\u5efa\u7b26\u5408\u76ee\u6807\u4efb\u52a1\u7684\u81ea\u5b9a\u4e49\u6a21\u578b(\u5b66\u751f\u6a21\u578b)\uff0c \u4f46\u662f\u8fc1\u79fb\u5b66\u4e60\u4e2d\u7684\u5b66\u751f\u6a21\u578b\u53ef\u80fd\u4f1a\u7ee7\u627f\u6559\u5e08\u6a21\u578b\u4e2d\u7684\u7f3a\u9677\uff0c \u800c\u6a21\u578b\u9c81\u68d2\u6027\u662f\u4f5c\u4e3a\u8861\u91cf\u6a21\u578b\u7f3a\u9677\u7ee7\u627f\u7684\u91cd\u8981\u6307\u6807\u4e4b\u4e00. \u5728\u8fc1\u79fb\u5b66\u4e60\u9886\u57df\u4e2d\uff0c \u901a\u5e38\u4f1a\u8fd0\u7528\u7f3a\u9677\u7f13\u89e3\u6216\u5b66\u751f\u6a21\u578b\u548c\u6559\u5e08\u6a21\u578b\u8054\u5408\u8bad\u7ec3\u7684\u65b9\u6cd5\uff0c \u8fbe\u5230\u51cf\u5c11\u7ee7\u627f\u6559\u5e08\u6a21\u578b\u7684\u7f3a\u9677\u77e5\u8bc6\u76ee\u7684. \u56e0\u6b64\uff0c \u8bba\u6587\u63d0\u51fa\u4e00\u79cd\u7528\u4e8e\u63a2\u7d22\u8fc1\u79fb\u5b66\u4e60\u8fc7\u7a0b\u4e2d\u6a21\u578b\u9c81\u68d2\u6027\u53d8\u5316\u60c5\u51b5\u7684\u53ef\u89c6\u5206\u6790\u65b9\u6cd5\uff0c \u5e76\u6784\u5efa\u4e86\u76f8\u5e94\u7684\u539f\u578b\u7cfb\u7edf\u2014\u2014TLMRVis. \u8be5\u65b9\u6cd5\u9996\u5148\u8ba1\u7b97\u4e86\u5b66\u751f\u6a21\u578b\u7684\u9c81\u68d2\u6027\u80fd\u6307\u6807; \u5176\u6b21\u5728\u6570\u636e\u5b9e\u4f8b\u5c42\u9762\u5c55\u793a\u6a21\u578b\u5404\u7c7b\u522b\u7684\u8868\u73b0\u6027\u80fd; \u7136\u540e\u5728\u5b9e\u4f8b\u7279\u5f81\u5c42\u9762\u901a\u8fc7\u6a21\u578b\u62bd\u8c61\u5316\u65b9\u5f0f\u53bb\u63ed\u793a\u6559\u5e08\u6a21\u578b\u548c\u5b66\u751f\u6a21\u578b\u4e4b\u95f4\u7ee7\u627f\u7684\u91cd\u7528\u77e5\u8bc6; \u6700\u540e\u7ed3\u5408\u6a21\u578b\u5207\u7247\u65b9\u6cd5\u6539\u5584\u6a21\u578b\u7684\u7f3a\u9677\u7ee7\u627f\u7528\u4ee5\u63d0\u9ad8\u6a21\u578b\u9c81\u68d2\u6027. \u540c\u65f6\uff0c TLMRVis\u7cfb\u7edf\u4e0d\u4ec5\u7ed3\u5408\u591a\u79cd\u53ef\u89c6\u5316\u65b9\u6cd5\u5c55\u793a\u591a\u79cd\u5b66\u751f\u6a21\u578b\u548c\u6559\u5e08\u6a21\u578b\u4e4b\u95f4\u7684\u5f02\u540c\u70b9\uff0c \u800c\u4e14\u901a\u8fc7\u5f15\u5165\u7f3a\u9677\u7f13\u89e3\u6280\u672f\u6765\u67e5\u770b\u548c\u8bca\u65ad\u6559\u5e08\u6a21\u578b\u548c\u5b66\u751f\u6a21\u578b\u7684\u6027\u80fd\u53d8\u5316\u548c\u5e95\u5c42\u9884\u6d4b\u884c\u4e3a\u673a\u5236. 2\u4e2a\u6848\u4f8b\u7684\u5b9e\u9a8c\u7ed3\u679c\u8868\u660e\uff0c TLMRVis\u7cfb\u7edf\u53ef\u4ee5\u5e2e\u52a9\u7528\u6237\u5206\u6790\u8fc1\u79fb\u5b66\u4e60\u4e2d\u6a21\u578b\u7684\u9c81\u68d2\u6027\u3001\u6a21\u578b\u7ee7\u627f\u7684\u7f3a\u9677\u77e5\u8bc6\u548c\u6a21\u578b\u7f3a\u9677\u6539\u5584\u540e\u7684\u6027\u80fd\u53d8\u5316."
+ },
+ {
+ "name": "\u57fa\u4e8e\u8fd1\u7aef\u7ebf\u6027\u7ec4\u5408\u7684\u4fe1\u53f7\u8bc6\u522b\u795e\u7ecf\u7f51\u7edc\u9ed1\u76d2\u5bf9\u6297\u653b\u51fb\u65b9\u6cd5",
+ "authors": [
+ "\u90ed\u5b87\u7426",
+ "\u674e\u4e1c\u9633",
+ "\u95eb\u9554",
+ "\u738b\u6797\u5143"
+ ],
+ "affiliations": [
+ "\u6218\u7565\u652f\u63f4\u90e8\u961f\u4fe1\u606f\u5de5\u7a0b\u5927\u5b66\u6210\u50cf\u4e0e\u667a\u80fd\u5904\u7406\u5b9e\u9a8c\u5ba4"
+ ],
+ "abstract": "\u968f\u7740\u6df1\u5ea6\u5b66\u4e60\u5728\u65e0\u7ebf\u901a\u4fe1\u9886\u57df\u7279\u522b\u662f\u4fe1\u53f7\u8c03\u5236\u8bc6\u522b\u65b9\u5411\u7684\u5e7f\u6cdb\u5e94\u7528\uff0c\u795e\u7ecf\u7f51\u7edc\u6613\u53d7\u5bf9\u6297\u6837\u672c\u653b\u51fb\u7684\u95ee\u9898\u540c\u6837\u5f71\u54cd\u7740\u65e0\u7ebf\u901a\u4fe1\u7684\u5b89\u5168\u3002\u9488\u5bf9\u65e0\u7ebf\u4fe1\u53f7\u5728\u901a\u4fe1\u4e2d\u96be\u4ee5\u5b9e\u65f6\u83b7\u5f97\u795e\u7ecf\u7f51\u7edc\u53cd\u9988\u4e14\u53ea\u80fd\u8bbf\u95ee\u8bc6\u522b\u7ed3\u679c\u7684\u9ed1\u76d2\u653b\u51fb\u573a\u666f\uff0c\u63d0\u51fa\u4e86\u4e00\u79cd\u57fa\u4e8e\u8fd1\u7aef\u7ebf\u6027\u7ec4\u5408\u7684\u9ed1\u76d2\u67e5\u8be2\u5bf9\u6297\u653b\u51fb\u65b9\u6cd5\u3002\u8be5\u65b9\u6cd5\u9996\u5148\u5728\u6570\u636e\u96c6\u7684\u4e00\u4e2a\u5b50\u96c6\u4e0a\uff0c\u5bf9\u6bcf\u4e2a\u539f\u59cb\u4fe1\u53f7\u6837\u672c\u8fdb\u884c\u8fd1\u7aef\u7ebf\u6027\u7ec4\u5408\uff0c\u5373\u5728\u975e\u5e38\u9760\u8fd1\u539f\u59cb\u4fe1\u53f7\u7684\u8303\u56f4\u5185\u4e0e\u76ee\u6807\u4fe1\u53f7\u8fdb\u884c\u7ebf\u6027\u7ec4\u5408\uff08\u52a0\u6743\u7cfb\u6570\u4e0d\u5927\u4e8e0.05\uff09\uff0c\u5e76\u5c06\u5176\u8f93\u5165\u5f85\u653b\u51fb\u7f51\u7edc\u67e5\u8be2\u8bc6\u522b\u7ed3\u679c\u3002\u901a\u8fc7\u7edf\u8ba1\u7f51\u7edc\u5bf9\u5168\u90e8\u8fd1\u7aef\u7ebf\u6027\u7ec4\u5408\u8bc6\u522b\u51fa\u9519\u7684\u6570\u91cf\uff0c\u786e\u5b9a\u6bcf\u7c7b\u539f\u59cb\u4fe1\u53f7\u6700\u5bb9\u6613\u53d7\u5230\u7ebf\u6027\u7ec4\u5408\u5f71\u54cd\u7684\u7279\u5b9a\u76ee\u6807\u4fe1\u53f7\uff0c\u5c06\u5176\u79f0\u4e3a\u6700\u4f73\u6270\u52a8\u4fe1\u53f7\u3002\u5728\u653b\u51fb\u6d4b\u8bd5\u65f6\uff0c\u6839\u636e\u4fe1\u53f7\u7684\u7c7b\u522b\u9009\u62e9\u5bf9\u5e94\u6700\u4f73\u6270\u52a8\u4fe1\u53f7\u6267\u884c\u8fd1\u7aef\u7ebf\u6027\u7ec4\u5408\uff0c\u751f\u6210\u5bf9\u6297\u6837\u672c\u3002\u5b9e\u9a8c\u7ed3\u679c\u663e\u793a\uff0c\u8be5\u65b9\u6cd5\u5728\u9009\u5b9a\u5b50\u96c6\u4e0a\u9488\u5bf9\u6bcf\u79cd\u8c03\u5236\u7c7b\u522b\u7684\u6700\u4f73\u6270\u52a8\u4fe1\u53f7\uff0c\u6dfb\u52a0\u5728\u5168\u90e8\u6570\u636e\u96c6\u4e0a\u80fd\u5c06\u795e\u7ecf\u7f51\u7edc\u8bc6\u522b\u51c6\u786e\u7387\u4ece94%\u964d\u523050%\uff0c\u4e14\u76f8\u8f83\u4e8e\u6dfb\u52a0\u968f\u673a\u566a\u58f0\u653b\u51fb\u7684\u6270\u52a8\u529f\u7387\u66f4\u5c0f\u3002\u6b64\u5916\uff0c\u751f\u6210\u7684\u5bf9\u6297\u6837\u672c\u5bf9\u4e8e\u7ed3\u6784\u8fd1\u4f3c\u7684\u795e\u7ecf\u7f51\u7edc\u5177\u6709\u4e00\u5b9a\u8fc1\u79fb\u6027\u3002\u8fd9\u79cd\u65b9\u6cd5\u5728\u7edf\u8ba1\u67e5\u8be2\u540e\u751f\u6210\u65b0\u7684\u5bf9\u6297\u6837\u672c\u65f6\uff0c\u6613\u4e8e\u5b9e\u73b0\u4e14\u65e0\u9700\u518d\u8fdb\u884c\u9ed1\u76d2\u67e5\u8be2\u3002"
+ },
+ {
+ "name": "\u57fa\u4e8e\u751f\u6210\u5bf9\u6297\u7f51\u7edc\u7684\u6df1\u5ea6\u4f2a\u9020\u8de8\u6a21\u578b\u9632\u5fa1\u65b9\u6cd5",
+ "authors": [
+ "\u6234\u78ca",
+ "\u66f9\u6797",
+ "\u90ed\u4e9a\u7537",
+ "\u5f20\u5e06",
+ "\u675c\u5eb7\u5b81"
+ ],
+ "affiliations": [
+ "\u5317\u4eac\u4fe1\u606f\u79d1\u6280\u5927\u5b66\u4fe1\u606f\u4e0e\u901a\u4fe1\u5de5\u7a0b\u5b66\u9662"
+ ],
+ "abstract": "\u4e3a\u4e86\u964d\u4f4e\u6df1\u5ea6\u4f2a\u9020\uff08deepfake\uff09\u6280\u672f\u6ee5\u7528\u5e26\u6765\u7684\u793e\u4f1a\u98ce\u9669\uff0c\u63d0\u51fa\u4e00\u79cd\u57fa\u4e8e\u751f\u6210\u5bf9\u6297\u7f51\u7edc\u7684\u4e3b\u52a8\u9632\u5fa1\u6df1\u5ea6\u4f2a\u9020\u65b9\u6cd5\uff0c\u901a\u8fc7\u5728\u539f\u59cb\u56fe\u50cf\u4e0a\u589e\u52a0\u5fae\u5f31\u6270\u52a8\u5236\u4f5c\u5bf9\u6297\u6837\u672c\uff0c\u4f7f\u591a\u4e2a\u4f2a\u9020\u6a21\u578b\u8f93\u51fa\u4ea7\u751f\u660e\u663e\u5931\u771f\u3002\u63d0\u51fa\u7684\u6a21\u578b\u7531\u5bf9\u6297\u6837\u672c\u751f\u6210\u6a21\u5757\u548c\u5bf9\u6297\u6837\u672c\u4f18\u5316\u6a21\u5757\u7ec4\u6210\u3002\u5bf9\u6297\u6837\u672c\u751f\u6210\u6a21\u5757\u5305\u62ec\u751f\u6210\u5668\u548c\u9274\u522b\u5668\uff0c\u751f\u6210\u5668\u63a5\u6536\u539f\u59cb\u56fe\u50cf\u751f\u6210\u6270\u52a8\u540e\uff0c\u901a\u8fc7\u5bf9\u6297\u8bad\u7ec3\u7ea6\u675f\u6270\u52a8\u7684\u7a7a\u95f4\u5206\u5e03\uff0c\u964d\u4f4e\u6270\u52a8\u7684\u89c6\u89c9\u611f\u77e5\uff0c\u63d0\u9ad8\u5bf9\u6297\u6837\u672c\u7684\u771f\u5b9e\u6027\uff1b\u5bf9\u6297\u6837\u672c\u4f18\u5316\u6a21\u5757\u7531\u57fa\u7840\u5bf9\u6297\u6c34\u5370\u3001\u6df1\u5ea6\u4f2a\u9020\u6a21\u578b\u548c\u9274\u522b\u5668\u7b49\u7ec4\u6210\uff0c\u901a\u8fc7\u6a21\u62df\u9ed1\u76d2\u573a\u666f\u4e0b\u653b\u51fb\u591a\u4e2a\u6df1\u5ea6\u4f2a\u9020\u6a21\u578b\uff0c\u63d0\u9ad8\u5bf9\u6297\u6837\u672c\u7684\u653b\u51fb\u6027\u548c\u8fc1\u79fb\u6027\u3002\u5728\u5e38\u7528\u6df1\u5ea6\u4f2a\u9020\u6570\u636e\u96c6CelebA\u548cLFW\u8fdb\u884c\u4e86\u8bad\u7ec3\u548c\u6d4b\u8bd5\uff0c\u5b9e\u9a8c\u7ed3\u679c\u8868\u660e\uff0c\u76f8\u6bd4\u73b0\u6709\u4e3b\u52a8\u9632\u5fa1\u65b9\u6cd5\uff0c\u672c\u6587\u5728\u5b9e\u73b0\u8de8\u6a21\u578b\u4e3b\u52a8\u9632\u5fa1\u7684\u57fa\u7840\u4e0a\uff0c\u9632\u5fa1\u6210\u529f\u7387\u8fbe\u523085%\u4ee5\u4e0a\uff0c\u5e76\u4e14\u5bf9\u6297\u6837\u672c\u751f\u6210\u6548\u7387\u6bd4\u4f20\u7edf\u7b97\u6cd5\u63d0\u9ad820\uff5e30\u500d\u3002"
+ },
+ {
+ "name": "\u57fa\u4e8e\u6df1\u5ea6\u5b66\u4e60\u7684\u6307\u7eb9\u5ba4\u5185\u5b9a\u4f4d\u5bf9\u6297\u6837\u672c\u653b\u51fb\u7814\u7a76",
+ "authors": [
+ "\u5f20\u5b66\u519b1",
+ "\u5e2d\u963f\u53cb1",
+ "\u52a0\u5c0f\u7ea21",
+ "\u5f20\u658c1",
+ "\u674e\u68851",
+ "\u675c\u6653\u521a2",
+ "\u9ec4\u6d77\u71d51"
+ ],
+ "affiliations": [
+ "1. \u5170\u5dde\u4ea4\u901a\u5927\u5b66\u7535\u5b50\u4e0e\u4fe1\u606f\u5de5\u7a0b\u5b66\u9662",
+ "2. \u9655\u897f\u79d1\u6280\u5927\u5b66\u7535\u5b50\u4fe1\u606f\u4e0e\u4eba\u5de5\u667a\u80fd\u5b66\u9662"
+ ],
+ "abstract": "\u57fa\u4e8e\u6df1\u5ea6\u5b66\u4e60\u7684\u6307\u7eb9\u5ba4\u5185\u5b9a\u4f4d\u7cfb\u7edf\u56e0\u5176\u80fd\u591f\u6709\u6548\u62bd\u53d6RSS\u6307\u7eb9\u6570\u636e\u7684\u6df1\u5c42\u7279\u5f81\u800c\u5927\u5e45\u63d0\u9ad8\u4e86\u5ba4\u5185\u5b9a\u4f4d\u6027\u80fd\uff0c\u4f46\u8be5\u7c7b\u65b9\u6cd5\u9700\u8981\u5927\u91cf\u3001\u591a\u6837\u5316\u7684RSS\u6307\u7eb9\u6570\u636e\u8bad\u7ec3\u6a21\u578b\uff0c\u5e76\u4e14\u5bf9\u5176\u5b89\u5168\u6f0f\u6d1e\u4e5f\u7f3a\u4e4f\u5145\u5206\u7684\u7814\u7a76\uff0c\u8fd9\u4e9b\u5b89\u5168\u6f0f\u6d1e\u6e90\u4e8e\u65e0\u7ebfWi-Fi\u5a92\u4f53\u7684\u5f00\u653e\u6027\u548c\u5206\u7c7b\u5668\u7684\u56fa\u6709\u7f3a\u9677\uff08\u5982\u6613\u906d\u53d7\u5bf9\u6297\u6027\u653b\u51fb\u7b49\uff09\u3002\u4e3a\u6b64\uff0c\u5bf9\u57fa\u4e8e\u6df1\u5ea6\u5b66\u4e60\u7684RSS\u6307\u7eb9\u5ba4\u5185\u5b9a\u4f4d\u7cfb\u7edf\u7684\u5bf9\u6297\u6027\u653b\u51fb\u8fdb\u884c\u7814\u7a76\uff0c\u63d0\u51fa\u4e86\u4e00\u79cd\u57fa\u4e8eWi-Fi\u6307\u7eb9\u5ba4\u5185\u5b9a\u4f4d\u7684\u5bf9\u6297\u6837\u672c\u653b\u51fb\u6846\u67b6\uff0c\u5e76\u5229\u7528\u8be5\u6846\u67b6\u7814\u7a76\u4e86\u5bf9\u6297\u653b\u51fb\u5bf9\u57fa\u4e8e\u6df1\u5ea6\u5b66\u4e60\u7684RSS\u6307\u7eb9\u5ba4\u5185\u5b9a\u4f4d\u6a21\u578b\u6027\u80fd\u7684\u5f71\u54cd\u3002\u8be5\u6846\u67b6\u5305\u542b\u79bb\u7ebf\u8bad\u7ec3\u548c\u5728\u7ebf\u5b9a\u4f4d\u4e24\u4e2a\u9636\u6bb5\uff0c\u5728\u79bb\u7ebf\u8bad\u7ec3\u9636\u6bb5\uff0c\u8bbe\u8ba1\u9002\u7528\u4e8e\u589e\u5e7fWi-Fi RSS\u6307\u7eb9\u6570\u636e\u7684\u6761\u4ef6\u751f\u6210\u5bf9\u6297\u7f51\u7edc\uff08CGAN\uff09\u6765\u751f\u6210\u5927\u91cf\u3001\u591a\u6837\u5316\u7684RSS\u6307\u7eb9\u6570\u636e\u8bad\u7ec3\u9ad8\u9c81\u68d2\u7684\u5ba4\u5185\u5b9a\u4f4d\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\uff1b\u5728\u7ebf\u5b9a\u4f4d\u9636\u6bb5\uff0c\u6784\u9020\u6700\u5f3a\u7684\u4e00\u9636\u653b\u51fb\u7b56\u7565\u6765\u751f\u6210\u9488\u5bf9Wi-Fi RSS\u6307\u7eb9\u5ba4\u5185\u5b9a\u4f4d\u7cfb\u7edf\u7684\u6709\u6548RSS\u5bf9\u6297\u6837\u672c\uff0c\u7814\u7a76\u5bf9\u6297\u653b\u51fb\u5bf9\u4e0d\u540c\u5ba4\u5185\u5b9a\u4f4d\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u6027\u80fd\u7684\u5f71\u54cd\u3002\u5b9e\u9a8c\u7ed3\u679c\u8868\u660e\uff1a\u5728\u516c\u5f00UJIIndoorLoc\u6570\u636e\u96c6\u4e0a\uff0c\u7531\u6240\u63d0\u6846\u67b6\u751f\u6210\u7684RSS\u6307\u7eb9\u5bf9\u6297\u6837\u672c\u5bf9\u73b0\u6709\u7684CNN\u3001DNN\u3001MLP\u3001pixeldp\uff3fCNN\u7684\u6307\u7eb9\u5ba4\u5185\u5b9a\u4f4d\u6a21\u578b\u7684\u653b\u51fb\u6210\u529f\u7387\u5206\u522b\u8fbe\u523094.1%\uff0c63.75%\uff0c43.45%\uff0c72.5%\uff1b\u800c\u4e14\uff0c\u5bf9\u7531CGAN\u7f51\u7edc\u589e\u5e7f\u6570\u636e\u8bad\u7ec3\u7684\u4e0a\u8ff0\u56db\u79cd\u6307\u7eb9\u5ba4\u5185\u5b9a\u4f4d\u6a21\u578b\u7684\u653b\u51fb\u6210\u529f\u7387\u4ecd\u5206\u522b\u8fbe\u5230\u4e8684.95%\uff0c44.8%\uff0c15.7%\uff0c11.5%\uff1b\u56e0\u6b64\uff0c\u73b0\u6709\u7684\u57fa\u4e8e\u6df1\u5ea6\u5b66\u4e60\u7684\u6307\u7eb9\u5ba4\u5185\u5b9a\u4f4d\u6a21\u578b\u6613\u906d\u53d7\u5bf9\u6297\u6837\u672c\u653b\u51fb\u7684\u5f71\u54cd\uff0c\u7531\u771f\u5b9e\u6570\u636e\u548c\u589e\u5e7f\u6570\u636e\u6df7\u5408\u8bad\u7ec3\u7684\u5ba4\u5185\u5b9a\u4f4d\u6a21\u578b\u5728\u9762\u4e34\u5bf9\u6297\u6837\u672c\u653b\u51fb\u65f6\u5177\u6709\u66f4\u597d\u7684\u9c81\u68d2\u6027\u3002 "
+ },
+ {
+ "name": "\u57fa\u4e8eGAN\u7684\u65e0\u6570\u636e\u9ed1\u76d2\u5bf9\u6297\u653b\u51fb\u65b9\u6cd5",
+ "authors": [
+ "\u8d75\u6069\u6d69",
+ "\u51cc\u6377"
+ ],
+ "affiliations": [
+ "\u5e7f\u4e1c\u5de5\u4e1a\u5927\u5b66\u8ba1\u7b97\u673a\u5b66\u9662"
+ ],
+ "abstract": "\u5bf9\u6297\u6837\u672c\u80fd\u591f\u4f7f\u6df1\u5ea6\u795e\u7ecf\u7f51\u7edc\u4ee5\u9ad8\u7f6e\u4fe1\u5ea6\u8f93\u51fa\u9519\u8bef\u7684\u7ed3\u679c\u3002\u5728\u9ed1\u76d2\u653b\u51fb\u4e2d\uff0c\u73b0\u6709\u7684\u66ff\u4ee3\u6a21\u578b\u8bad\u7ec3\u65b9\u6cd5\u9700\u8981\u76ee\u6807\u6a21\u578b\u5168\u90e8\u6216\u90e8\u5206\u8bad\u7ec3\u6570\u636e\u624d\u80fd\u53d6\u5f97\u8f83\u597d\u7684\u653b\u51fb\u6548\u679c\uff0c\u4f46\u5b9e\u9645\u5e94\u7528\u4e2d\u76ee\u6807\u6a21\u578b\u7684\u8bad\u7ec3\u6570\u636e\u96be\u4ee5\u83b7\u53d6\u3002\u56e0\u6b64\uff0c\u6587\u4e2d\u63d0\u51fa\u4e00\u79cd\u57fa\u4e8eGAN\u7684\u65e0\u6570\u636e\u9ed1\u76d2\u5bf9\u6297\u653b\u51fb\u65b9\u6cd5\u3002\u65e0\u9700\u76ee\u6807\u6a21\u578b\u7684\u8bad\u7ec3\u6570\u636e\uff0c\u4f7f\u7528\u6df7\u5408\u6807\u7b7e\u4fe1\u606f\u7684\u566a\u58f0\u751f\u6210\u66ff\u4ee3\u6a21\u578b\u6240\u9700\u7684\u8bad\u7ec3\u6837\u672c\uff0c\u901a\u8fc7\u76ee\u6807\u6a21\u578b\u7684\u6807\u8bb0\u4fe1\u606f\u4ee5\u53ca\u591a\u6837\u5316\u635f\u5931\u51fd\u6570\u4f7f\u8bad\u7ec3\u6837\u672c\u5206\u5e03\u5747\u5300\u4e14\u5305\u542b\u66f4\u591a\u7279\u5f81\u4fe1\u606f\uff0c\u8fdb\u800c\u4f7f\u66ff\u4ee3\u6a21\u578b\u9ad8\u6548\u5b66\u4e60\u76ee\u6807\u6a21\u578b\u7684\u5206\u7c7b\u529f\u80fd\u3002\u5bf9\u6bd4DaST\u548cMAZE\uff0c\u6587\u4e2d\u65b9\u6cd5\u5728\u964d\u4f4e35%-60%\u7684\u5bf9\u6297\u6270\u52a8\u548c\u67e5\u8be2\u6b21\u6570\u7684\u540c\u65f6\u5bf9CIFAR-100\u3001CIFAR-10\u3001SVHN\u3001FMNIST\u3001MNIST\u4e94\u4e2a\u6570\u636e\u96c6\u7684FGSM\u3001BIM\u3001PGD\u4e09\u79cd\u653b\u51fb\u7684\u6210\u529f\u7387\u5e73\u5747\u63d0\u9ad86%-10%\u3002\u5e76\u4e14\u5728\u5b9e\u9645\u5e94\u7528\u4e2d\u7684\u9ed1\u76d2\u6a21\u578b\u573a\u666fMicrosoft Azure\u53d6\u5f9778%\u4ee5\u4e0a\u7684\u653b\u51fb\u6210\u529f\u7387\u3002"
+ },
+ {
+ "name": "\u9762\u5411\u9c81\u68d2\u56fe\u7ed3\u6784\u9632\u5fa1\u7684\u8fc7\u53c2\u6570\u5316\u56fe\u795e\u7ecf\u7f51\u7edc",
+ "authors": [
+ "\u521d\u65ed1",
+ "\u9a6c\u8f9b\u5b872,3",
+ "\u6797\u96332,3",
+ "\u738b\u946b1,4",
+ "\u738b\u4e9a\u6c993,5",
+ "\u6731\u6587\u6b661,4",
+ "\u6885\u5b8f3"
+ ],
+ "affiliations": [
+ "1. \u6e05\u534e\u5927\u5b66\u8ba1\u7b97\u673a\u79d1\u5b66\u4e0e\u6280\u672f\u7cfb",
+ "2. \u5317\u4eac\u5927\u5b66\u8ba1\u7b97\u673a\u5b66\u9662",
+ "3. \u9ad8\u53ef\u4fe1\u8f6f\u4ef6\u6280\u672f\u6559\u80b2\u90e8\u91cd\u70b9\u5b9e\u9a8c\u5ba4(\u5317\u4eac\u5927\u5b66)",
+ "4. \u6e05\u534e\u5927\u5b66\u5317\u4eac\u4fe1\u606f\u79d1\u5b66\u4e0e\u6280\u672f\u56fd\u5bb6\u7814\u7a76\u4e2d\u5fc3",
+ "5. \u5317\u4eac\u5927\u5b66\u8f6f\u4ef6\u5de5\u7a0b\u56fd\u5bb6\u5de5\u7a0b\u4e2d\u5fc3"
+ ],
+ "abstract": "\u56fe\u6570\u636e\u5728\u73b0\u5b9e\u5e94\u7528\u4e2d\u666e\u904d\u5b58\u5728,\u56fe\u795e\u7ecf\u7f51\u7edc(GNN)\u88ab\u5e7f\u6cdb\u5e94\u7528\u4e8e\u5206\u6790\u56fe\u6570\u636e,\u7136\u800cGNN\u7684\u6027\u80fd\u4f1a\u88ab\u56fe\u7ed3\u6784\u4e0a\u7684\u5bf9\u6297\u653b\u51fb\u5267\u70c8\u5f71\u54cd.\u5e94\u5bf9\u56fe\u7ed3\u6784\u4e0a\u7684\u5bf9\u6297\u653b\u51fb,\u73b0\u6709\u7684\u9632\u5fa1\u65b9\u6cd5\u4e00\u822c\u57fa\u4e8e\u56fe\u5185\u805a\u5148\u9a8c\u8fdb\u884c\u4f4e\u79e9\u56fe\u7ed3\u6784\u91cd\u6784.\u4f46\u662f\u73b0\u6709\u7684\u56fe\u7ed3\u6784\u5bf9\u6297\u9632\u5fa1\u65b9\u6cd5\u65e0\u6cd5\u81ea\u9002\u5e94\u79e9\u771f\u503c\u8fdb\u884c\u4f4e\u79e9\u56fe\u7ed3\u6784\u91cd\u6784,\u540c\u65f6\u4f4e\u79e9\u56fe\u7ed3\u6784\u4e0e\u4e0b\u6e38\u4efb\u52a1\u8bed\u4e49\u5b58\u5728\u9519\u914d.\u4e3a\u4e86\u89e3\u51b3\u4ee5\u4e0a\u95ee\u9898,\u57fa\u4e8e\u8fc7\u53c2\u6570\u5316\u7684\u9690\u5f0f\u6b63\u5219\u6548\u5e94\u63d0\u51fa\u8fc7\u53c2\u6570\u5316\u56fe\u795e\u7ecf\u7f51\u7edc(OPGNN)\u65b9\u6cd5,\u5e76\u5f62\u5f0f\u5316\u8bc1\u660e\u6240\u63d0\u65b9\u6cd5\u53ef\u4ee5\u81ea\u9002\u5e94\u6c42\u89e3\u4f4e\u79e9\u56fe\u7ed3\u6784,\u540c\u65f6\u8bc1\u660e\u8282\u70b9\u6df1\u5c42\u8868\u5f81\u4e0a\u7684\u8fc7\u53c2\u6570\u5316\u6b8b\u5dee\u94fe\u63a5\u53ef\u4ee5\u6709\u6548\u89e3\u51b3\u8bed\u4e49\u9519\u914d.\u5728\u771f\u5b9e\u6570\u636e\u96c6\u4e0a\u7684\u5b9e\u9a8c\u7ed3\u679c\u8868\u660e, OPGNN\u65b9\u6cd5\u76f8\u5bf9\u4e8e\u73b0\u6709\u57fa\u7ebf\u65b9\u6cd5\u5177\u6709\u66f4\u597d\u7684\u9c81\u68d2\u6027,\u540c\u65f6, OPGNN\u65b9\u6cd5\u6846\u67b6\u5728\u4e0d\u540c\u7684\u56fe\u795e\u7ecf\u7f51\u7edc\u9aa8\u5e72\u4e0a\u5982GCN\u3001APPNP\u548cGPRGNN\u4e0a\u663e\u8457\u6709\u6548."
+ },
+ {
+ "name": "\u57fa\u4e8e\u751f\u6210\u5f0f\u81ea\u76d1\u7763\u5b66\u4e60\u7684\u5bf9\u6297\u6837\u672c\u5206\u7c7b\u7b97\u6cd5",
+ "authors": [
+ "\u9633\u5e061",
+ "\u9b4f\u5baa2,3",
+ "\u90ed\u6770\u9f992,3",
+ "\u90d1\u5efa\u6f332,3",
+ "\u5170\u6d772"
+ ],
+ "affiliations": [
+ "1. \u798f\u5dde\u5927\u5b66\u5148\u8fdb\u5236\u9020\u5b66\u9662",
+ "2. \u4e2d\u56fd\u79d1\u5b66\u9662\u798f\u5efa\u7269\u8d28\u7ed3\u6784\u7814\u7a76\u6240",
+ "3. \u4e2d\u56fd\u79d1\u5b66\u9662\u6d77\u897f\u7814\u7a76\u9662\u6cc9\u5dde\u88c5\u5907\u5236\u9020\u7814\u7a76\u4e2d\u5fc3"
+ ],
+ "abstract": "\u5bf9\u6297\u6837\u672c\u5e38\u5e38\u88ab\u89c6\u4e3a\u5bf9\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u9c81\u68d2\u6027\u7684\u5a01\u80c1\uff0c\u800c\u73b0\u6709\u5bf9\u6297\u8bad\u7ec3\u5f80\u5f80\u4f1a\u964d\u4f4e\u5206\u7c7b\u7f51\u7edc\u7684\u6cdb\u5316\u7cbe\u5ea6\uff0c\u5bfc\u81f4\u5176\u5bf9\u539f\u59cb\u6837\u672c\u7684\u5206\u7c7b\u6548\u679c\u964d\u4f4e\u3002\u56e0\u6b64\uff0c\u63d0\u51fa\u4e86\u4e00\u4e2a\u57fa\u4e8e\u751f\u6210\u5f0f\u81ea\u76d1\u7763\u5b66\u4e60\u7684\u5bf9\u6297\u6837\u672c\u5206\u7c7b\u7b97\u6cd5\uff0c\u901a\u8fc7\u81ea\u76d1\u7763\u5b66\u4e60\u8bad\u7ec3\u751f\u6210\u5f0f\u6a21\u578b\u83b7\u53d6\u56fe\u50cf\u6570\u636e\u6f5c\u5728\u7279\u5f81\u7684\u80fd\u529b\uff0c\u5e76\u57fa\u4e8e\u8be5\u6a21\u578b\u5b9e\u73b0\u5bf9\u6297\u6837\u672c\u7684\u7279\u5f81\u7b5b\u9009\uff0c\u800c\u540e\u5c06\u5176\u4e2d\u6709\u76ca\u5206\u7c7b\u7684\u4fe1\u606f\u53cd\u9988\u7ed9\u5206\u7c7b\u6a21\u578b\u3002\u6700\u540e\u8fdb\u884c\u8054\u5408\u5b66\u4e60\uff0c\u5b8c\u6210\u7aef\u5230\u7aef\u7684\u5168\u5c40\u8bad\u7ec3\uff0c\u8fdb\u4e00\u6b65\u5b9e\u73b0\u5206\u7c7b\u6a21\u578b\u6cdb\u5316\u7cbe\u5ea6\u7684\u63d0\u5347\u3002\u5728MNIST\u3001CIFAR10\u548cCIFAR100\u6570\u636e\u96c6\u4e0a\u7684\u5b9e\u9a8c\u7ed3\u679c\u663e\u793a\uff0c\u4e0e\u6807\u51c6\u8bad\u7ec3\u76f8\u6bd4\uff0c\u8be5\u7b97\u6cd5\u5c06\u5206\u7c7b\u7cbe\u5ea6\u5206\u522b\u63d0\u9ad8\u4e860.06%\u30011.34%\u30010.89%\uff0c\u8fbe\u523099.70%\u300184.34%\u300163.65%\u3002\u7ed3\u679c\u8bc1\u660e\uff0c\u8be5\u7b97\u6cd5\u514b\u670d\u4e86\u4f20\u7edf\u5bf9\u6297\u8bad\u7ec3\u964d\u4f4e\u6a21\u578b\u6cdb\u5316\u6027\u80fd\u7684\u56fa\u6709\u7f3a\u70b9\uff0c\u5e76\u8fdb\u4e00\u6b65\u63d0\u9ad8\u4e86\u5206\u7c7b\u7f51\u7edc\u7684\u7cbe\u5ea6\u3002"
+ },
+ {
+ "name": "\u65f6\u9891\u5206\u533a\u6270\u52a8\u5b9e\u73b0\u97f3\u9891\u5206\u7c7b\u5bf9\u6297\u6837\u672c\u751f\u6210",
+ "authors": [
+ "\u5f20\u96c4\u4f1f",
+ "\u5f20\u5f3a",
+ "\u6768\u5409\u658c",
+ "\u5b59\u8499",
+ "\u674e\u6bc5\u8c6a"
+ ],
+ "affiliations": [
+ "\u9646\u519b\u5de5\u7a0b\u5927\u5b66\u6307\u6325\u63a7\u5236\u5de5\u7a0b\u5b66\u9662"
+ ],
+ "abstract": "\u73b0\u6709\u65b9\u6cd5\u751f\u6210\u7684\u97f3\u9891\u5206\u7c7b\u5bf9\u6297\u6837\u672c(adversarial example, AE)\u653b\u51fb\u6210\u529f\u7387\u4f4e\uff0c\u6613\u88ab\u611f\u77e5\u3002\u9274\u4e8e\u6b64\uff0c\u8bbe\u8ba1\u4e86\u4e00\u79cd\u57fa\u4e8e\u65f6\u9891\u5206\u533a\u6270\u52a8(time-frequency partitioned perturbation, TFPP)\u7684\u97f3\u9891AE\u751f\u6210\u6846\u67b6\u3002\u97f3\u9891\u4fe1\u53f7\u7684\u5e45\u5ea6\u8c31\u6839\u636e\u65f6\u9891\u7279\u6027\u88ab\u5212\u5206\u4e3a\u5173\u952e\u548c\u975e\u5173\u952e\u533a\u57df\uff0c\u5e76\u751f\u6210\u76f8\u5e94\u7684\u5bf9\u6297\u6270\u52a8\u3002\u5728TFPP\u57fa\u7840\u4e0a\uff0c\u63d0\u51fa\u4e86\u4e00\u79cd\u57fa\u4e8e\u751f\u6210\u5bf9\u6297\u7f51\u7edc(generative adversarial network, GAN)\u7684AE\u751f\u6210\u65b9\u6cd5TFPPGAN,\u4ee5\u5206\u533a\u5e45\u5ea6\u8c31\u4e3a\u8f93\u5165\uff0c\u901a\u8fc7\u5bf9\u6297\u8bad\u7ec3\u81ea\u9002\u5e94\u8c03\u6574\u6270\u52a8\u7ea6\u675f\u7cfb\u6570\uff0c\u540c\u65f6\u4f18\u5316\u5173\u952e\u548c\u975e\u5173\u952e\u533a\u57df\u7684\u6270\u52a8\u30023\u4e2a\u5178\u578b\u97f3\u9891\u5206\u7c7b\u6570\u636e\u96c6\u4e0a\u7684\u5b9e\u9a8c\u8868\u660e\uff0c\u4e0e\u57fa\u7ebf\u65b9\u6cd5\u76f8\u6bd4\uff0cTFPPGAN\u53ef\u5c06AE\u7684\u653b\u51fb\u6210\u529f\u7387\u3001\u4fe1\u566a\u6bd4\u5206\u522b\u63d0\u9ad84.7%\u548c5.5 dB,\u5c06\u751f\u6210\u7684\u8bed\u97f3\u5bf9\u6297\u6837\u672c\u7684\u8d28\u91cf\u611f\u77e5\u8bc4\u4ef7\u5f97\u5206\u63d0\u9ad80.15\u3002\u6b64\u5916\uff0c\u7406\u8bba\u5206\u6790\u4e86TFPP\u6846\u67b6\u4e0e\u5176\u4ed6\u653b\u51fb\u65b9\u6cd5\u76f8\u7ed3\u5408\u7684\u53ef\u884c\u6027\uff0c\u5e76\u901a\u8fc7\u5b9e\u9a8c\u9a8c\u8bc1\u4e86\u8fd9\u79cd\u7ed3\u5408\u7684\u6709\u6548\u6027\u3002"
+ },
+ {
+ "name": "\u57fa\u4e8e\u635f\u5931\u5e73\u6ed1\u7684\u5bf9\u6297\u6837\u672c\u653b\u51fb\u65b9\u6cd5",
+ "authors": [
+ "\u9ece\u59b9\u7ea21,2",
+ "\u91d1\u53cc1,2",
+ "\u675c\u66541,2"
+ ],
+ "affiliations": [
+ "1. \u5317\u4eac\u4ea4\u901a\u5927\u5b66\u667a\u80fd\u4ea4\u901a\u6570\u636e\u5b89\u5168\u4e0e\u9690\u79c1\u4fdd\u62a4\u6280\u672f\u5317\u4eac\u5e02\u91cd\u70b9\u5b9e\u9a8c\u5ba4",
+ "2. \u5317\u4eac\u4ea4\u901a\u5927\u5b66\u8ba1\u7b97\u673a\u4e0e\u4fe1\u606f\u6280\u672f\u5b66\u9662"
+ ],
+ "abstract": "\u6df1\u5ea6\u795e\u7ecf\u7f51\u7edc(DNNs)\u5bb9\u6613\u53d7\u5230\u5bf9\u6297\u6837\u672c\u7684\u653b\u51fb\uff0c\u73b0\u6709\u57fa\u4e8e\u52a8\u91cf\u7684\u5bf9\u6297\u6837\u672c\u751f\u6210\u65b9\u6cd5\u867d\u7136\u53ef\u4ee5\u8fbe\u5230\u63a5\u8fd1100%\u7684\u767d\u76d2\u653b\u51fb\u6210\u529f\u7387\uff0c\u4f46\u662f\u5728\u653b\u51fb\u5176\u4ed6\u6a21\u578b\u65f6\u6548\u679c\u4ecd\u4e0d\u7406\u60f3\uff0c\u9ed1\u76d2\u653b\u51fb\u6210\u529f\u7387\u8f83\u4f4e\u3002\u9488\u5bf9\u6b64\uff0c\u63d0\u51fa\u4e00\u79cd\u57fa\u4e8e\u635f\u5931\u5e73\u6ed1\u7684\u5bf9\u6297\u6837\u672c\u653b\u51fb\u65b9\u6cd5\u6765\u63d0\u9ad8\u5bf9\u6297\u6837\u672c\u7684\u53ef\u8fc1\u79fb\u6027\u3002\u5728\u6bcf\u4e00\u6b65\u8ba1\u7b97\u68af\u5ea6\u7684\u8fed\u4ee3\u8fc7\u7a0b\u4e2d\uff0c\u4e0d\u76f4\u63a5\u4f7f\u7528\u5f53\u524d\u68af\u5ea6\uff0c\u800c\u662f\u4f7f\u7528\u5c40\u90e8\u5e73\u5747\u68af\u5ea6\u6765\u7d2f\u79ef\u52a8\u91cf\uff0c\u4ee5\u6b64\u6765\u6291\u5236\u635f\u5931\u51fd\u6570\u66f2\u9762\u5b58\u5728\u7684\u5c40\u90e8\u632f\u8361\u73b0\u8c61\uff0c\u4ece\u800c\u7a33\u5b9a\u66f4\u65b0\u65b9\u5411\uff0c\u9003\u79bb\u5c40\u90e8\u6781\u503c\u70b9\u3002\u5728ImageNet\u6570\u636e\u96c6\u4e0a\u7684\u5927\u91cf\u5b9e\u9a8c\u7ed3\u679c\u8868\u660e\uff1a\u6240\u63d0\u65b9\u6cd5\u4e0e\u73b0\u6709\u57fa\u4e8e\u52a8\u91cf\u7684\u65b9\u6cd5\u76f8\u6bd4\uff0c\u5728\u5355\u4e2a\u6a21\u578b\u653b\u51fb\u5b9e\u9a8c\u4e2d\u7684\u5e73\u5747\u9ed1\u76d2\u653b\u51fb\u6210\u529f\u7387\u5206\u522b\u63d0\u5347\u4e8638.07%\u548c27.77%\uff0c\u5728\u96c6\u6210\u6a21\u578b\u653b\u51fb\u5b9e\u9a8c\u4e2d\u7684\u5e73\u5747\u9ed1\u76d2\u653b\u51fb\u6210\u529f\u7387\u5206\u522b\u63d0\u5347\u4e8632.50%\u548c28.63%\u3002"
+ },
+ {
+ "name": "\u56fe\u50cf\u5185\u5bb9\u7cbe\u7ec6\u5316\u611f\u77e5\u53ca\u5176\u5b89\u5168\u5173\u952e\u6280\u672f\u7814\u7a76",
+ "authors": [
+ "\u738b\u854a1,2",
+ "\u8346\u4e3d\u68661,2",
+ "\u90b9\u806a1,2",
+ "\u5415\u98de\u97041,2",
+ "\u6731\u5b50\u74871,2"
+ ],
+ "affiliations": [
+ "1. \u4e2d\u56fd\u79d1\u5b66\u9662\u4fe1\u606f\u5de5\u7a0b\u7814\u7a76\u6240",
+ "2. \u4e2d\u56fd\u79d1\u5b66\u9662\u5927\u5b66\u7f51\u7edc\u7a7a\u95f4\u5b89\u5168\u5b66\u9662"
+ ],
+ "abstract": "\u56fe\u50cf\u5185\u5bb9\u7cbe\u7ec6\u5316\u611f\u77e5\u662f\u8ba1\u7b97\u673a\u89c6\u89c9\u9886\u57df\u5185\u7684\u4e00\u4e2a\u57fa\u7840\u6027\u95ee\u9898,\u65e8\u5728\u5bf9\u56fe\u50cf\u4e2d\u5305\u542b\u7684\u4fe1\u606f\u8fdb\u884c\u7cbe\u7ec6\u5316\u7406\u89e3,\u5177\u6709\u91cd\u8981\u7684\u7814\u7a76\u4ef7\u503c\u548c\u5e7f\u9614\u7684\u5e94\u7528\u573a\u666f\u3002\u6839\u636e\u5173\u6ce8\u8303\u56f4\u7684\u4e0d\u540c,\u56fe\u50cf\u5185\u5bb9\u7cbe\u7ec6\u5316\u611f\u77e5\u4e3b\u8981\u5305\u62ec\u7ec6\u7c92\u5ea6\u8bc6\u522b\u3001\u573a\u666f\u56fe\u751f\u6210\u548c\u56fe\u50cf\u63cf\u8ff0\u7b49\u65b9\u9762\u3002\u672c\u6587\u9996\u5148\u5bf9\u5404\u5173\u952e\u6280\u672f\u7684\u7814\u7a76\u8fdb\u5c55\u548c\u73b0\u72b6\u8fdb\u884c\u7efc\u8ff0;\u7136\u540e\u8ba8\u8bba\u4e86\u76f4\u63a5\u5f71\u54cd\u611f\u77e5\u6a21\u578b\u9884\u6d4b\u7ed3\u679c\u7684\u5b89\u5168\u5a01\u80c1,\u6982\u8ff0\u4e86\u76f8\u5173\u653b\u51fb\u53ca\u9632\u5fa1\u6280\u672f\u7684\u7814\u7a76\u8fdb\u5c55;\u6700\u540e\u5bf9\u8be5\u9886\u57df\u7684\u672a\u6765\u53d1\u5c55\u8d8b\u52bf\u4f5c\u51fa\u5c55\u671b\u3002"
+ },
+ {
+ "name": "\u878d\u5408\u7f16\u7801\u53ca\u5bf9\u6297\u653b\u51fb\u7684\u5143\u8def\u5f84\u805a\u5408\u56fe\u795e\u7ecf\u7f51\u7edc",
+ "authors": [
+ "\u9648\u5b66\u521a1",
+ "\u59dc\u5f81\u548c2",
+ "\u674e\u4f73\u73893"
+ ],
+ "affiliations": [
+ "1. \u534e\u5317\u7535\u529b\u5927\u5b66\u6570\u7406\u5b66\u9662",
+ "2. \u667a\u8005\u56db\u6d77(\u5317\u4eac)\u6280\u672f\u6709\u9650\u516c\u53f8",
+ "3. \u534e\u5317\u7535\u529b\u5927\u5b66\u63a7\u5236\u4e0e\u8ba1\u7b97\u673a\u5de5\u7a0b\u5b66\u9662"
+ ],
+ "abstract": "\u5f02\u8d28\u4fe1\u606f\u7f51\u7edc\uff08HIN\uff09\u7531\u4e8e\u5305\u542b\u4e0d\u540c\u7c7b\u578b\u7684\u8282\u70b9\u548c\u8fb9\uff0c \u5728\u5b9e\u9645\u95ee\u9898\u4e2d\u5177\u6709\u5e7f\u6cdb\u7684\u5e94\u7528\u524d\u666f. HIN \u7684\u8868\u793a\u5b66\u4e60\u6a21\u578b\u65e8\u5728\u5bfb\u627e\u4e00\u79cd\u6709\u6548\u7684\u5efa\u6a21\u65b9\u6cd5\uff0c \u5c06 HIN \u4e2d\u7684\u8282\u70b9\u8868\u793a\u4e3a\u4f4e\u7ef4\u5411\u91cf\uff0c \u5e76\u5c3d\u53ef\u80fd\u5730\u4fdd\u7559\u7f51\u7edc\u4e2d\u7684\u5f02\u8d28\u4fe1\u606f. \u7136\u800c\uff0c \u73b0\u6709\u7684\u8868\u793a\u5b66\u4e60\u6a21\u578b\u4ecd\u5b58\u5728\u7740\u5bf9\u5f02\u8d28\u4fe1\u606f\u5229\u7528\u4e0d\u5145\u5206\u7684\u60c5\u51b5. \u4e3a\u89e3\u51b3\u8fd9\u4e00\u95ee\u9898\uff0c \u672c\u6587\u63d0\u51fa\u4e86\u4e00\u79cd\u878d\u5408\u7f16\u7801\u548c\u5bf9\u6297\u653b\u51fb\u7684\u5143\u8def\u5f84\u805a\u5408\u56fe\u795e\u7ecf\u7f51\u7edc\u6a21\u578b\uff08FAMAGNN\uff09\uff0c \u8be5\u6a21\u578b\u7531\u4e09\u4e2a\u6a21\u5757\u6784\u6210\uff0c \u5206\u522b\u662f\u8282\u70b9\u5185\u5bb9\u8f6c\u6362\u3001\u5143\u8def\u5f84\u5185\u805a\u5408\u548c\u5143\u8def\u5f84\u95f4\u805a\u5408. \u8be5\u6a21\u578b\u65e8\u5728\u89e3\u51b3\u73b0\u6709 HIN \u8868\u793a\u5b66\u4e60\u65b9\u6cd5\u63d0\u53d6\u7279\u5f81\u4e0d\u5145\u5206\u7684\u95ee\u9898. \u540c\u65f6\uff0c FAMAGNN\u5f15\u5165\u4e86\u878d\u5408\u7684\u5143\u8def\u5f84\u5b9e\u4f8b\u7f16\u7801\u5668\uff0c \u4ee5\u63d0\u53d6 HIN \u4e2d\u4e30\u5bcc\u7684\u7ed3\u6784\u548c\u8bed\u4e49\u4fe1\u606f. \u6b64\u5916\uff0c \u6a21\u578b\u8fd8\u5f15\u5165\u4e86\u5bf9\u6297\u8bad\u7ec3\uff0c \u5728\u6a21\u578b\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u8fdb\u884c\u5bf9\u6297\u653b\u51fb\uff0c \u4ee5\u63d0\u9ad8\u6a21\u578b\u7684\u9c81\u68d2\u6027. FAMAGNN \u5728\u8282\u70b9\u5206\u7c7b\u548c\u8282\u70b9\u805a\u7c7b\u7b49\u4e0b\u6e38\u4efb\u52a1\u4e2d\u7684\u4f18\u5f02\u8868\u73b0\u8bc1\u660e\u4e86\u5176\u6709\u6548\u6027."
+ },
+ {
+ "name": "\u9762\u5411\u7f51\u7edc\u5165\u4fb5\u68c0\u6d4b\u7684\u5bf9\u6297\u653b\u51fb\u7cfb\u7edf",
+ "authors": [
+ "\u6f58\u5b87\u6052",
+ "\u5ed6\u601d\u8d24",
+ "\u6768\u671d\u4fca",
+ "\u674e\u5b97\u548c",
+ "\u4e8e\u5a77\u5a77",
+ "\u5f20\u745e\u971e"
+ ],
+ "affiliations": [
+ "\u6842\u6797\u7535\u5b50\u79d1\u6280\u5927\u5b66"
+ ],
+ "abstract": "\u8be5\u9879\u76ee\u7814\u7a76\u591a\u79cd\u767d\u76d2\u653b\u51fb\u7b97\u6cd5\u751f\u6210\u767d\u76d2\u5bf9\u6297\u6837\u672c\u7684\u6548\u7387\uff0c\u540c\u65f6\u8fd0\u7528\u751f\u6210\u5bf9\u6297\u7f51\u7edc(GAN)\u6280\u672f\u6765\u751f\u6210\u9ed1\u76d2\u5bf9\u6297\u6837\u672c\uff0c\u5e76\u4e14\u901a\u8fc7\u6784\u5efa\u7f51\u7edc\u5165\u4fb5\u68c0\u6d4b\u6a21\u578b\u5305\u62ec\u8bef\u7528\u68c0\u6d4b\u548c\u5f02\u5e38\u68c0\u6d4b\u6a21\u578b\uff0c\u6765\u6d4b\u8bd5\u8fd9\u4e9b\u751f\u6210\u7684\u5bf9\u6297\u6837\u672c\u5728\u9762\u5bf9\u591a\u79cd\u4e0d\u540c\u5165\u4fb5\u68c0\u6d4b\u6a21\u578b\u65f6\u7684\u653b\u51fb\u7684\u6210\u529f\u7387,\u6700\u7ec8\u6784\u5efa\u4e00\u4e2a\u7f51\u7edc\u5165\u4fb5\u68c0\u6d4b\u7cfb\u7edf\u7684\u5bf9\u6297\u6837\u672c\u751f\u6210\u5668\uff08\u5305\u542b\u767d\u76d2\u548c\u9ed1\u76d2\u5bf9\u6297\u6837\u672c\uff09\u3002"
+ },
+ {
+ "name": "\u4eba\u8138\u6df1\u5ea6\u4f2a\u9020\u4e3b\u52a8\u9632\u5fa1\u6280\u672f\u7efc\u8ff0",
+ "authors": [
+ "\u77bf\u5de6\u73c9",
+ "\u6bb7\u742a\u6797",
+ "\u76db\u7d2b\u7426",
+ "\u5434\u4fca\u5f66",
+ "\u5f20\u535a\u6797",
+ "\u4f59\u5c1a\u620e",
+ "\u5362\u4f1f"
+ ],
+ "affiliations": [],
+ "abstract": "\u6df1\u5ea6\u751f\u6210\u6a21\u578b\u7684\u98de\u901f\u53d1\u5c55\u63a8\u52a8\u4e86\u4eba\u8138\u6df1\u5ea6\u4f2a\u9020\u6280\u672f\u7684\u8fdb\u6b65\uff0c\u4ee5Deepfake\u4e3a\u4ee3\u8868\u7684\u6df1\u5ea6\u4f2a\u9020\u6a21\u578b\u4e5f\u5f97\u5230\u4e86\u5341\u5206\u5e7f\u6cdb\u7684\u5e94\u7528\u3002\u6df1\u5ea6\u4f2a\u9020\u6280\u672f\u53ef\u4ee5\u5bf9\u4eba\u8138\u56fe\u50cf\u6216\u89c6\u9891\u8fdb\u884c\u6709\u76ee\u7684\u7684\u64cd\u7eb5\uff0c\u4e00\u65b9\u9762\uff0c\u8fd9\u79cd\u6280\u672f\u5e7f\u6cdb\u5e94\u7528\u4e8e\u7535\u5f71\u7279\u6548\u3001\u5a31\u4e50\u573a\u666f\u4e2d\uff0c\u4e30\u5bcc\u4e86\u4eba\u4eec\u7684\u5a31\u4e50\u751f\u6d3b\uff0c\u4fc3\u8fdb\u4e86\u4e92\u8054\u7f51\u591a\u5a92\u4f53\u7684\u4f20\u64ad\uff1b\u53e6\u4e00\u65b9\u9762\uff0c\u6df1\u5ea6\u4f2a\u9020\u4e5f\u5e94\u7528\u4e8e\u4e00\u4e9b\u53ef\u80fd\u9020\u6210\u4e0d\u826f\u5f71\u54cd\u7684\u573a\u666f\uff0c\u7ed9\u516c\u6c11\u7684\u540d\u8a89\u6743\u3001\u8096\u50cf\u6743\u9020\u6210\u4e86\u5371\u5bb3\uff0c\u540c\u65f6\u4e5f\u7ed9\u56fd\u5bb6\u5b89\u5168\u548c\u793e\u4f1a\u7a33\u5b9a\u5e26\u6765\u4e86\u6781\u5927\u7684\u5a01\u80c1\uff0c\u56e0\u6b64\u5bf9\u6df1\u5ea6\u4f2a\u9020\u9632\u5fa1\u6280\u672f\u7684\u7814\u7a76\u65e5\u76ca\u8feb\u5207\u3002\u73b0\u6709\u7684\u9632\u5fa1\u6280\u672f\u4e3b\u8981\u5206\u4e3a\u88ab\u52a8\u68c0\u6d4b\u548c\u4e3b\u52a8\u9632\u5fa1\uff0c\u800c\u88ab\u52a8\u68c0\u6d4b\u7684\u65b9\u5f0f\u65e0\u6cd5\u6d88\u9664\u4f2a\u9020\u4eba\u8138\u5728\u5e7f\u6cdb\u4f20\u64ad\u4e2d\u9020\u6210\u7684\u5f71\u54cd\uff0c\u96be\u4ee5\u505a\u5230\u201c\u4e8b\u524d\u9632\u5fa1\u201d\uff0c\u56e0\u6b64\u4e3b\u52a8\u9632\u5fa1\u7684\u601d\u60f3\u5f97\u5230\u4e86\u7814\u7a76\u4eba\u5458\u7684\u5e7f\u6cdb\u5173\u6ce8\u3002\u7136\u800c\uff0c\u76ee\u524d\u5b66\u672f\u754c\u6709\u5173\u6df1\u5ea6\u4f2a\u9020\u9632\u5fa1\u7684\u7efc\u8ff0\u4e3b\u8981\u5173\u6ce8\u57fa\u4e8e\u68c0\u6d4b\u7684\u88ab\u52a8\u5f0f\u9632\u5fa1\u65b9\u6cd5\uff0c\u51e0\u4e4e\u6ca1\u6709\u4ee5\u6df1\u5ea6\u4f2a\u9020\u4e3b\u52a8\u9632\u5fa1\u6280\u672f\u4e3a\u91cd\u70b9\u7684\u7efc\u8ff0\u3002\u57fa\u4e8e\u6b64\uff0c\u672c\u6587\u5bf9\u5f53\u524d\u5b66\u672f\u754c\u63d0\u51fa\u7684\u4eba\u8138\u6df1\u5ea6\u4f2a\u9020\u4e3b\u52a8\u9632\u5fa1\u6280\u672f\u8fdb\u884c\u68b3\u7406\u3001\u603b\u7ed3\u548c\u8ba8\u8bba\u3002\u9996\u5148\u9610\u8ff0\u4e86\u6df1\u5ea6\u4f2a\u9020\u4e3b\u52a8\u9632\u5fa1\u7684\u63d0\u51fa\u80cc\u666f\u548c\u4e3b\u8981\u601d\u60f3\uff0c\u5e76\u5bf9\u73b0\u6709\u7684\u4eba\u8138\u6df1\u5ea6\u4f2a\u9020\u4e3b\u52a8\u9632\u5fa1\u7b97\u6cd5\u8fdb\u884c\u6c47\u603b\u548c\u5f52\u7c7b\uff0c\u7136\u540e\u5bf9\u5404\u7c7b\u4e3b\u52a8\u9632\u5fa1\u7b97\u6cd5\u7684\u6280\u672f\u539f\u7406\u3001\u6027\u80fd\u3001\u4f18\u7f3a\u70b9\u7b49\u8fdb\u884c\u4e86\u7cfb\u7edf\u6027\u7684\u603b\u7ed3\uff0c\u540c\u65f6\u4ecb\u7ecd\u4e86\u7814\u7a76\u5e38\u7528\u7684\u6570\u636e\u96c6\u548c\u8bc4\u4f30\u65b9\u6cd5\uff0c\u6700\u540e\u5bf9\u6df1\u5ea6\u4f2a\u9020\u4e3b\u52a8\u9632\u5fa1\u6240\u9762\u4e34\u7684\u6280\u672f\u6311\u6218\u8fdb\u884c\u4e86\u5206\u6790\uff0c\u5bf9\u5176\u672a\u6765\u7684\u53d1\u5c55\u65b9\u5411\u5c55\u5f00\u4e86\u601d\u8003\u548c\u8ba8\u8bba\u3002 "
+ },
+ {
+ "name": "\u57fa\u4e8e\u5c40\u90e8\u6270\u52a8\u7684\u65f6\u95f4\u5e8f\u5217\u9884\u6d4b\u5bf9\u6297\u653b\u51fb",
+ "authors": [
+ "\u5f20\u8000\u51431,2",
+ "\u539f\u7ee7\u4e1c1,2",
+ "\u5218\u6d77\u6d0b2",
+ "\u738b\u5fd7\u6d772",
+ "\u8d75\u57f9\u7fd42"
+ ],
+ "affiliations": [
+ "1. \u4ea4\u901a\u5927\u6570\u636e\u4e0e\u4eba\u5de5\u667a\u80fd\u6559\u80b2\u90e8\u91cd\u70b9\u5b9e\u9a8c\u5ba4(\u5317\u4eac\u4ea4\u901a\u5927\u5b66)",
+ "2. \u5317\u4eac\u4ea4\u901a\u5927\u5b66\u8ba1\u7b97\u673a\u4e0e\u4fe1\u606f\u6280\u672f\u5b66\u9662"
+ ],
+ "abstract": "\u65f6\u95f4\u5e8f\u5217\u9884\u6d4b\u6a21\u578b\u5df2\u5e7f\u6cdb\u5e94\u7528\u4e8e\u65e5\u5e38\u751f\u6d3b\u4e2d\u7684\u5404\u4e2a\u884c\u4e1a,\u9488\u5bf9\u8fd9\u4e9b\u9884\u6d4b\u6a21\u578b\u7684\u5bf9\u6297\u653b\u51fb\u5173\u7cfb\u5230\u5404\u884c\u4e1a\u6570\u636e\u7684\u5b89\u5168\u6027.\u76ee\u524d,\u65f6\u95f4\u5e8f\u5217\u7684\u5bf9\u6297\u653b\u51fb\u591a\u5728\u5168\u5c40\u8303\u56f4\u5185\u8fdb\u884c\u5927\u89c4\u6a21\u6270\u52a8,\u5bfc\u81f4\u5bf9\u6297\u6837\u672c\u6613\u88ab\u611f\u77e5.\u540c\u65f6,\u5bf9\u6297\u653b\u51fb\u7684\u6548\u679c\u4f1a\u968f\u7740\u6270\u52a8\u5e45\u5ea6\u7684\u964d\u4f4e\u800c\u660e\u663e\u4e0b\u964d.\u56e0\u6b64,\u5982\u4f55\u5728\u751f\u6210\u4e0d\u6613\u5bdf\u89c9\u7684\u5bf9\u6297\u6837\u672c\u7684\u540c\u65f6\u4fdd\u6301\u8f83\u597d\u7684\u653b\u51fb\u6548\u679c,\u662f\u5f53\u524d\u65f6\u95f4\u5e8f\u5217\u9884\u6d4b\u5bf9\u6297\u653b\u51fb\u9886\u57df\u4e9f\u9700\u89e3\u51b3\u7684\u95ee\u9898\u4e4b\u4e00.\u9996\u5148\u63d0\u51fa\u4e00\u79cd\u57fa\u4e8e\u6ed1\u52a8\u7a97\u53e3\u7684\u5c40\u90e8\u6270\u52a8\u7b56\u7565,\u7f29\u5c0f\u5bf9\u6297\u6837\u672c\u7684\u6270\u52a8\u533a\u95f4;\u5176\u6b21,\u4f7f\u7528\u5dee\u5206\u8fdb\u5316\u7b97\u6cd5\u5bfb\u627e\u6700\u4f18\u653b\u51fb\u70b9\u4f4d,\u5e76\u7ed3\u5408\u5206\u6bb5\u51fd\u6570\u5206\u5272\u6270\u52a8\u533a\u95f4,\u8fdb\u4e00\u6b65\u964d\u4f4e\u6270\u52a8\u8303\u56f4,\u5b8c\u6210\u534a\u767d\u76d2\u653b\u51fb.\u548c\u5df2\u6709\u7684\u5bf9\u6297\u653b\u51fb\u65b9\u6cd5\u5728\u591a\u4e2a\u4e0d\u540c\u6df1\u5ea6\u6a21\u578b\u4e0a\u7684\u5bf9\u6bd4\u5b9e\u9a8c\u8868\u660e,\u6240\u63d0\u51fa\u7684\u65b9\u6cd5\u80fd\u591f\u751f\u6210\u4e0d\u6613\u611f\u77e5\u7684\u5bf9\u6297\u6837\u672c,\u5e76\u6709\u6548\u6539\u53d8\u6a21\u578b\u7684\u9884\u6d4b\u8d8b\u52bf,\u5728\u80a1\u7968\u4ea4\u6613\u3001\u7535\u529b\u6d88\u8017\u3001\u592a\u9633\u9ed1\u5b50\u89c2\u6d4b\u548c\u6c14\u6e29\u9884\u6d4b\u8fd94\u4e2a\u5177\u6709\u6311\u6218\u6027\u7684\u4efb\u52a1\u4e2d\u5747\u53d6\u5f97\u4e86\u8f83\u597d\u7684\u653b\u51fb\u6548\u679c."
+ },
+ {
+ "name": "\u6587\u672c\u5bf9\u6297\u653b\u9632\u6280\u672f\u5728\u7535\u4fe1\u7f51\u7edc\u8bc8\u9a97\u9632\u63a7\u9886\u57df\u7684\u5e94\u7528\u63a2\u6790",
+ "authors": [
+ "\u6c64\u535a\u6587"
+ ],
+ "affiliations": [
+ "\u4e2d\u56fd\u4eba\u6c11\u8b66\u5bdf\u5927\u5b66(\u5eca\u574a)"
+ ],
+ "abstract": "\u968f\u7740\u81ea\u7136\u8bed\u8a00\u5904\u7406\u6a21\u578b\u8fd1\u671f\u5728\u4eba\u5de5\u667a\u80fd\u9886\u57df\u7684\u201c\u51fa\u5708\u201d\uff0c\u6838\u5fc3\u6a21\u578b\u6d89\u53ca\u7684\u5bf9\u6297\u653b\u9632\u6280\u672f\u7684\u53d1\u5c55\uff0c\u9010\u6e10\u6210\u4e3a\u4e00\u628a\u201c\u53cc\u5203\u5251\u201d\uff0c\u7535\u4fe1\u7f51\u7edc\u8bc8\u9a97\u4e0e\u9632\u63a7\u9886\u57df\u53cc\u65b9\u7684\u535a\u5f08\u5bf9\u6297\u6210\u4e3a\u7814\u7a76\u70ed\u70b9\u95ee\u9898\u3002\u901a\u8fc7\u5bf9\u4e0d\u540c\u8bc8\u9a97\u7c7b\u578b\u8fdb\u884c\u5206\u6790\uff0c\u7ed3\u5408\u7535\u4fe1\u7f51\u7edc\u8bc8\u9a97\u5168\u94fe\u6761\u4e0e\u73b0\u6709\u9632\u63a7\u6a21\u578b\u7684\u95ee\u9898\uff0c\u6df1\u5165\u6316\u6398\u53cd\u8bc8\u5e73\u53f0\u6838\u5fc3\u6280\u672f\uff0c\u8bbe\u8ba1\u4e86\u9488\u5bf9\u53cd\u8bc8\u68c0\u6d4b\u6a21\u578b\u7684\u6a21\u62df\u6587\u672c\u5bf9\u6297\u653b\u51fb\uff0c\u63a2\u6790\u6587\u672c\u5bf9\u6297\u653b\u9632\u6280\u672f\u5728\u7535\u4fe1\u7f51\u7edc\u8bc8\u9a97\u9632\u63a7\u9886\u57df\u7684\u5e94\u7528\uff0c\u5e76\u4e14\u8ba8\u8bba\u5176\u9762\u4e34\u7684\u6311\u6218\u4e0e\u524d\u666f\u3002"
+ },
+ {
+ "name": "\u4e00\u79cd\u968f\u673a\u675f\u641c\u7d22\u6587\u672c\u653b\u51fb\u9ed1\u76d2\u7b97\u6cd5",
+ "authors": [
+ "\u738b\u5c0f\u840c",
+ "\u5f20\u534e",
+ "\u4e01\u91d1\u6263",
+ "\u738b\u7a3c\u6167"
+ ],
+ "affiliations": [
+ "\u5317\u4eac\u90ae\u7535\u5927\u5b66\u7f51\u7edc\u4e0e\u4ea4\u6362\u6280\u672f\u56fd\u5bb6\u91cd\u70b9\u5b9e\u9a8c\u5ba4"
+ ],
+ "abstract": "\u9488\u5bf9\u73b0\u6709\u7684\u5bf9\u6297\u6587\u672c\u751f\u6210\u7b97\u6cd5\u4e2d\u6613\u9677\u5165\u5c40\u90e8\u6700\u4f18\u89e3\u7684\u95ee\u9898\uff0c\u63d0\u51fa\u4e86\u4e00\u79cd\u5229\u7528\u675f\u641c\u7d22\u548c\u968f\u673a\u5143\u6765\u63d0\u9ad8\u653b\u51fb\u6210\u529f\u7387\u7684R-attack\u7b97\u6cd5\u3002\u9996\u5148\u901a\u8fc7\u5229\u7528\u675f\u641c\u7d22\u6765\u5145\u5206\u5229\u7528\u540c\u4e49\u8bcd\u7a7a\u95f4\u6765\u641c\u7d22\u6700\u4f18\u89e3\uff0c\u4ece\u800c\u589e\u52a0\u751f\u6210\u5bf9\u6297\u6837\u672c\u7684\u591a\u6837\u6027\uff1b\u5e76\u4e14\u5728\u8fed\u4ee3\u641c\u7d22\u8fc7\u7a0b\u4e2d\uff0c\u5f15\u5165\u968f\u673a\u5143\uff0c\u7528\u4e8e\u9632\u6b62\u56e0\u5bfb\u627e\u5bf9\u6297\u6837\u672c\u8fc7\u7a0b\u4e2d\u8fc7\u65e9\u6536\u655b\u800c\u9677\u5165\u5c40\u90e8\u6700\u4f18\u89e3\u7684\u56f0\u5883\u3002\u57283\u4e2a\u6570\u636e\u96c6\u5bf92\u4e2a\u6a21\u578b\u8fdb\u884c\u4e86\u5bf9\u6297\u653b\u51fb\u5b9e\u9a8c\uff0c\u5b9e\u9a8c\u7ed3\u679c\u8868\u660e\uff0c\u7528R-attack\u7b97\u6cd5\u80fd\u591f\u6709\u6548\u63d0\u9ad8\u5bf9\u6297\u6837\u672c\u7684\u653b\u51fb\u6210\u529f\u7387\u3002\u4ee5\u653b\u51fbYahoo! Answers\u4e0a\u8bad\u7ec3\u7684LSTM\u6a21\u578b\u4e3a\u4f8b\uff0c\u7528R-attack\u7b97\u6cd5\u653b\u51fb\u6a21\u578b\u7684\u653b\u51fb\u6210\u529f\u7387\u76f8\u6bd4\u57fa\u7ebf\u63d0\u53472.4%\u3002"
+ },
+ {
+ "name": "\u57fa\u4e8e\u51b3\u7b56\u8fb9\u754c\u654f\u611f\u6027\u548c\u5c0f\u6ce2\u53d8\u6362\u7684\u7535\u78c1\u4fe1\u53f7\u8c03\u5236\u667a\u80fd\u8bc6\u522b\u5bf9\u6297\u6837\u672c\u68c0\u6d4b\u65b9\u6cd5",
+ "authors": [
+ "\u5f90\u4e1c\u4f1f1,2",
+ "\u848b\u658c1,2",
+ "\u6731\u6167\u71d51,2",
+ "\u5ba3\u74261,2",
+ "\u738b\u5dcd3",
+ "\u6797\u4e914",
+ "\u6c88\u4f1f\u56fd3",
+ "\u6768\u5c0f\u725b1,2,3"
+ ],
+ "affiliations": [
+ "1. \u6d59\u6c5f\u5de5\u4e1a\u5927\u5b66\u7f51\u7edc\u5b89\u5168\u7814\u7a76\u9662",
+ "2. \u6d59\u6c5f\u5de5\u4e1a\u5927\u5b66\u4fe1\u606f\u5de5\u7a0b\u5b66\u9662",
+ "3. \u91cd\u70b9\u7535\u78c1\u7a7a\u95f4\u5b89\u5168\u5168\u56fd\u91cd\u70b9\u5b9e\u9a8c\u5ba4",
+ "4. \u54c8\u5c14\u6ee8\u5de5\u7a0b\u5927\u5b66\u4fe1\u606f\u4e0e\u901a\u4fe1\u5de5\u7a0b\u5b66\u9662"
+ ],
+ "abstract": "\u6df1\u5ea6\u5b66\u4e60\u5728\u56fe\u50cf\u5206\u7c7b\u548c\u5206\u5272\u3001\u7269\u4f53\u68c0\u6d4b\u548c\u8ffd\u8e2a\u3001\u533b\u7597\u3001\u7ffb\u8bd1\u548c\u8bed\u97f3\u8bc6\u522b\u7b49\u4e0e\u4eba\u7c7b\u76f8\u5173\u7684\u4efb\u52a1\u4e2d\u53d6\u5f97\u4e86\u5de8\u5927\u7684\u6210\u529f\u3002\u5b83\u80fd\u591f\u5904\u7406\u5927\u91cf\u590d\u6742\u7684\u6570\u636e\uff0c\u5e76\u81ea\u52a8\u63d0\u53d6\u7279\u5f81\u8fdb\u884c\u9884\u6d4b\uff0c\u56e0\u6b64\u53ef\u4ee5\u66f4\u51c6\u786e\u5730\u9884\u6d4b\u7ed3\u679c\u3002\u968f\u7740\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u7684\u4e0d\u65ad\u53d1\u5c55\uff0c\u4ee5\u53ca\u53ef\u83b7\u5f97\u7684\u6570\u636e\u548c\u8ba1\u7b97\u80fd\u529b\u7684\u63d0\u9ad8\uff0c\u8fd9\u4e9b\u5e94\u7528\u7684\u51c6\u786e\u6027\u4e0d\u65ad\u63d0\u5347\u3002\u6700\u8fd1\uff0c\u6df1\u5ea6\u5b66\u4e60\u4e5f\u5728\u7535\u78c1\u4fe1\u53f7\u9886\u57df\u5f97\u5230\u4e86\u5e7f\u6cdb\u5e94\u7528\uff0c\u4f8b\u5982\u5229\u7528\u795e\u7ecf\u7f51\u7edc\u6839\u636e\u4fe1\u53f7\u7684\u9891\u57df\u548c\u65f6\u57df\u7279\u5f81\u5bf9\u5176\u8fdb\u884c\u5206\u7c7b\u3002\u4f46\u795e\u7ecf\u7f51\u7edc\u5bb9\u6613\u53d7\u5230\u5bf9\u6297\u6837\u672c\u7684\u5e72\u6270\uff0c\u8fd9\u4e9b\u5bf9\u6297\u6837\u672c\u53ef\u4ee5\u8f7b\u6613\u6b3a\u9a97\u795e\u7ecf\u7f51\u7edc\uff0c\u5bfc\u81f4\u5206\u7c7b\u9519\u8bef\u3002\u56e0\u6b64\uff0c\u5bf9\u6297\u6837\u672c\u7684\u751f\u6210\u3001\u68c0\u6d4b\u548c\u9632\u62a4\u7684\u7814\u7a76\u53d8\u5f97\u5c24\u4e3a\u91cd\u8981\uff0c\u8fd9\u5c06\u4fc3\u8fdb\u6df1\u5ea6\u5b66\u4e60\u5728\u7535\u78c1\u4fe1\u53f7\u9886\u57df\u548c\u5176\u4ed6\u9886\u57df\u7684\u53d1\u5c55\u3002\u9488\u5bf9\u73b0\u9636\u6bb5\u5355\u4e00\u7684\u68c0\u6d4b\u65b9\u6cd5\u7684\u6709\u6548\u6027\u4e0d\u9ad8\u7684\u95ee\u9898\uff0c\u63d0\u51fa\u4e86\u57fa\u4e8e\u51b3\u7b56\u8fb9\u754c\u654f\u611f\u6027\u548c\u5c0f\u6ce2\u53d8\u6362\u91cd\u6784\u7684\u5bf9\u6297\u6837\u672c\u68c0\u6d4b\u65b9\u6cd5\u3002\u5229\u7528\u4e86\u5bf9\u6297\u6837\u672c\u4e0e\u6b63\u5e38\u6837\u672c\u5bf9\u6a21\u578b\u51b3\u7b56\u8fb9\u754c\u7684\u654f\u611f\u6027\u5dee\u5f02\u6765\u8fdb\u884c\u68c0\u6d4b\uff0c\u63a5\u7740\u9488\u5bf9\u7b2c\u4e00\u68c0\u6d4b\u9636\u6bb5\u4e2d\u672a\u68c0\u6d4b\u51fa\u7684\u5bf9\u6297\u6837\u672c\uff0c\u672c\u6587\u5229\u7528\u5c0f\u6ce2\u53d8\u6362\u5bf9\u6837\u672c\u8fdb\u884c\u91cd\u6784\uff0c\u5229\u7528\u6837\u672c\u53bb\u566a\u524d\u540e\u5728\u6a21\u578b\u4e2d\u7684\u9884\u6d4b\u503c\u5dee\u5f02\u6765\u8fdb\u884c\u68c0\u6d4b\u3002\u672c\u6587\u5728\u4e24\u79cd\u8c03\u5236\u4fe1\u53f7\u6570\u636e\u96c6\u4e0a\u8fdb\u884c\u4e86\u5b9e\u9a8c\u5206\u6790\uff0c\u5e76\u4e0e\u57fa\u7ebf\u68c0\u6d4b\u65b9\u6cd5\u8fdb\u884c\u5bf9\u6bd4\uff0c\u6b64\u65b9\u6cd5\u66f4\u4f18\u3002\u8fd9\u4e00\u7814\u7a76\u7684\u521b\u65b0\u70b9\u5728\u4e8e\u7efc\u5408\u8003\u8651\u4e86\u6a21\u578b\u51b3\u7b56\u8fb9\u754c\u7684\u654f\u611f\u6027\u548c\u5c0f\u6ce2\u53d8\u6362\u7684\u91cd\u6784\u80fd\u529b\uff0c\u901a\u8fc7\u5de7\u5999\u7684\u7ec4\u5408\uff0c\u63d0\u51fa\u4e86\u4e00\u79cd\u66f4\u4e3a\u5168\u9762\u3001\u7cbe\u51c6\u7684\u5bf9\u6297\u6837\u672c\u68c0\u6d4b\u65b9\u6cd5\u3002\u8fd9\u4e3a\u6df1\u5ea6\u5b66\u4e60\u5728\u7535\u78c1\u4fe1\u53f7\u9886\u57df\u7684\u7a33\u5065\u5e94\u7528\u63d0\u4f9b\u4e86\u65b0\u7684\u601d\u8def\u548c\u65b9\u6cd5\u3002 "
+ },
+ {
+ "name": "\u9762\u5411\u667a\u80fd\u65e0\u4eba\u901a\u4fe1\u7cfb\u7edf\u7684\u56e0\u679c\u6027\u5bf9\u6297\u653b\u51fb\u751f\u6210\u7b97\u6cd5",
+ "authors": [
+ "\u79b9\u6811\u65871",
+ "\u8bb8\u5a011,2",
+ "\u59da\u5609\u94d61"
+ ],
+ "affiliations": [
+ "1. \u4e1c\u5357\u5927\u5b66\u79fb\u52a8\u901a\u4fe1\u5168\u56fd\u91cd\u70b9\u5b9e\u9a8c\u5ba4",
+ "2. \u7f51\u7edc\u901a\u4fe1\u4e0e\u5b89\u5168\u7d2b\u91d1\u5c71\u5b9e\u9a8c\u5ba4"
+ ],
+ "abstract": "\u8003\u8651\u5230\u57fa\u4e8e\u68af\u5ea6\u7684\u5bf9\u6297\u653b\u51fb\u751f\u6210\u7b97\u6cd5\u5728\u5b9e\u9645\u901a\u4fe1\u7cfb\u7edf\u90e8\u7f72\u4e2d\u9762\u4e34\u7740\u56e0\u679c\u6027\u95ee\u9898\uff0c\u63d0\u51fa\u4e86\u4e00\u79cd\u56e0\u679c\u6027\u5bf9\u6297\u653b\u51fb\u751f\u6210\u7b97\u6cd5\u3002\u5229\u7528\u957f\u77ed\u671f\u8bb0\u5fc6\u7f51\u7edc\u7684\u5e8f\u5217\u8f93\u5165\u8f93\u51fa\u7279\u5f81\u4e0e\u65f6\u5e8f\u8bb0\u5fc6\u80fd\u529b\uff0c\u5728\u6ee1\u8db3\u5b9e\u9645\u5e94\u7528\u4e2d\u5b58\u5728\u7684\u56e0\u679c\u6027\u7ea6\u675f\u524d\u63d0\u4e0b\uff0c\u6709\u6548\u63d0\u53d6\u901a\u4fe1\u4fe1\u53f7\u7684\u65f6\u5e8f\u76f8\u5173\u6027\uff0c\u589e\u5f3a\u9488\u5bf9\u65e0\u4eba\u901a\u4fe1\u7cfb\u7edf\u7684\u5bf9\u6297\u653b\u51fb\u6027\u80fd\u3002\u4eff\u771f\u7ed3\u679c\u8868\u660e\uff0c\u6240\u63d0\u7b97\u6cd5\u5728\u540c\u7b49\u6761\u4ef6\u4e0b\u7684\u653b\u51fb\u6027\u80fd\u4f18\u4e8e\u6cdb\u7528\u5bf9\u6297\u6270\u52a8\u7b49\u73b0\u6709\u7684\u56e0\u679c\u6027\u5bf9\u6297\u653b\u51fb\u751f\u6210\u7b97\u6cd5\u3002"
+ },
+ {
+ "name": "\u57fa\u4e8e\u6f5c\u5728\u6570\u636e\u6316\u6398\u7684\u5c0f\u6837\u672c\u6570\u636e\u5e93\u5bf9\u6297\u653b\u51fb\u9632\u5fa1\u7b97\u6cd5",
+ "authors": [
+ "\u66f9\u537f"
+ ],
+ "affiliations": [
+ "\u95fd\u5357\u7406\u5de5\u5b66\u9662\u4fe1\u606f\u7ba1\u7406\u5b66\u9662"
+ ],
+ "abstract": "\u4e3a\u4e86\u964d\u4f4e\u5c0f\u6837\u672c\u6570\u636e\u5e93\u6b3a\u9a97\u7387\uff0c\u63d0\u5347\u5c0f\u6837\u672c\u6570\u636e\u5e93\u7684\u653b\u51fb\u9632\u5fa1\u6548\u679c\uff0c\u8bbe\u8ba1\u4e86\u4e00\u79cd\u57fa\u4e8e\u6f5c\u5728\u6570\u636e\u6316\u6398\u7684\u5c0f\u6837\u672c\u6570\u636e\u5e93\u5bf9\u6297\u653b\u51fb\u7684\u9632\u5fa1\u7b97\u6cd5(\u6f5c\u5728\u6570\u636e\u6316\u6398\u7684\u9632\u5fa1\u7b97\u6cd5).\u91c7\u7528\u6539\u8fdb\u7684Apriori\u7b97\u6cd5\uff0c\u901a\u8fc7\u9891\u7e41\u5c5e\u6027\u503c\u96c6\u7684\u5de5\u4f5c\u8fc7\u7a0b\u83b7\u53d6\u51c6\u786e\u7684\u5f3a\u5173\u8054\u89c4\u5219\u4f18\u52bf\uff0c\u5e76\u4ece\u5c0f\u6837\u672c\u6570\u636e\u5e93\u4e2d\u6316\u6398\u6f5c\u5728\u6570\u636e\u5bf9\u6297\u653b\u51fb\uff0c\u540c\u65f6\u4f18\u5316\u5019\u9009\u96c6\u5bfb\u627e\u9891\u7e41\u96c6\u7684\u8fc7\u7a0b\uff0c\u7136\u540e\u5229\u7528\u5173\u8054\u5206\u6790\u68c0\u6d4b\u5bf9\u6297\u653b\u51fb\uff0c\u5e76\u901a\u8fc7\u53ef\u4fe1\u5ea6\u8c03\u5ea6\u63a7\u5236\u8bbf\u95ee\u901f\u7387\u6765\u9632\u6b62\u4ea7\u751f\u6076\u610f\u4f1a\u8bdd\uff0c\u5b9e\u73b0\u5c0f\u6837\u672c\u6570\u636e\u5e93\u5bf9\u6297\u653b\u51fb\u9632\u5fa1.\u5b9e\u9a8c\u7ed3\u679c\u8868\u660e\uff0c\u6f5c\u5728\u6570\u636e\u6316\u6398\u7684\u9632\u5fa1\u7b97\u6cd5\u53ef\u6709\u6548\u9632\u5fa1\u5c0f\u6837\u672c\u6570\u636e\u5e93\u906d\u53d7\u7684\u591a\u79cd\u7c7b\u578b\u653b\u51fb\uff0c\u964d\u4f4e\u653b\u51fb\u4ea7\u751f\u7684\u6570\u636e\u5e93\u6b3a\u9a97\u7387\uff0c\u4fdd\u969c\u5c0f\u6837\u672c\u6570\u636e\u5e93\u670d\u52a1\u5668\u5229\u7528\u7387\u7684\u7a33\u5b9a\u6027."
+ },
+ {
+ "name": "\u57fa\u4e8e\u96c5\u53ef\u6bd4\u663e\u8457\u56fe\u7684\u7535\u78c1\u4fe1\u53f7\u5feb\u901f\u5bf9\u6297\u653b\u51fb\u65b9\u6cd5",
+ "authors": [
+ "\u5f20\u5251",
+ "\u5468\u4fa0",
+ "\u5f20\u4e00\u7136",
+ "\u738b\u6893\u806a"
+ ],
+ "affiliations": [
+ "\u6b66\u6c49\u6570\u5b57\u5de5\u7a0b\u7814\u7a76\u6240"
+ ],
+ "abstract": "\u4e3a\u4e86\u751f\u6210\u9ad8\u8d28\u91cf\u7684\u7535\u78c1\u4fe1\u53f7\u5bf9\u6297\u6837\u672c\uff0c\u63d0\u51fa\u4e86\u5feb\u901f\u96c5\u53ef\u6bd4\u663e\u8457\u56fe\u653b\u51fb\uff08FJSMA\uff09\u65b9\u6cd5\u3002FJSMA\u901a\u8fc7\u8ba1\u7b97\u653b\u51fb\u76ee\u6807\u7c7b\u522b\u7684\u96c5\u53ef\u6bd4\u77e9\u9635\uff0c\u5e76\u6839\u636e\u8be5\u77e9\u9635\u751f\u6210\u7279\u5f81\u663e\u8457\u56fe\uff0c\u4e4b\u540e\u8fed\u4ee3\u9009\u53d6\u663e\u8457\u6027\u6700\u5f3a\u7684\u7279\u5f81\u70b9\u53ca\u5176\u90bb\u57df\u5185\u8fde\u7eed\u7279\u5f81\u70b9\u6dfb\u52a0\u6270\u52a8\uff0c\u540c\u65f6\u5f15\u5165\u5355\u70b9\u6270\u52a8\u9650\u5236\uff0c\u6700\u540e\u751f\u6210\u5bf9\u6297\u6837\u672c\u3002\u5b9e\u9a8c\u7ed3\u679c\u8868\u660e\uff0c\u4e0e\u96c5\u53ef\u6bd4\u663e\u8457\u56fe\u653b\u51fb\u65b9\u6cd5\u76f8\u6bd4\uff0cFJSMA\u5728\u4fdd\u6301\u4e0e\u4e4b\u76f8\u540c\u7684\u9ad8\u653b\u51fb\u6210\u529f\u7387\u7684\u540c\u65f6\uff0c\u751f\u6210\u901f\u5ea6\u63d0\u5347\u4e86\u7ea610\u500d\uff0c\u76f8\u4f3c\u5ea6\u63d0\u5347\u4e86\u8d85\u8fc711%\uff1b\u4e0e\u5176\u4ed6\u57fa\u4e8e\u68af\u5ea6\u7684\u65b9\u6cd5\u76f8\u6bd4\uff0c\u653b\u51fb\u6210\u529f\u7387\u63d0\u5347\u4e86\u8d85\u8fc720%\uff0c\u76f8\u4f3c\u5ea6\u63d0\u5347\u4e8620%\uff5e30%\u3002"
+ },
+ {
+ "name": "\u57fa\u4e8e\u52a8\u91cf\u8fed\u4ee3\u5feb\u901f\u68af\u5ea6\u7b26\u53f7\u7684SAR-ATR\u6df1\u5ea6\u795e\u7ecf\u7f51\u7edc\u9ed1\u76d2\u653b\u51fb\u7b97\u6cd5",
+ "authors": [
+ "\u4e07\u70dc\u7533",
+ "\u5218\u4f1f",
+ "\u725b\u671d\u9633",
+ "\u5362\u4e07\u6770"
+ ],
+ "affiliations": [
+ "\u4e2d\u56fd\u4eba\u6c11\u89e3\u653e\u519b\u6218\u7565\u652f\u63f4\u90e8\u961f\u4fe1\u606f\u5de5\u7a0b\u5927\u5b66\u6570\u636e\u4e0e\u76ee\u6807\u5de5\u7a0b\u5b66\u9662"
+ ],
+ "abstract": "\u5408\u6210\u5b54\u5f84\u96f7\u8fbe\u81ea\u52a8\u76ee\u6807\u8bc6\u522b(SAR-ATR)\u9886\u57df\u7f3a\u4e4f\u6709\u6548\u7684\u9ed1\u76d2\u653b\u51fb\u7b97\u6cd5\uff0c\u4e3a\u6b64\uff0c\u8be5\u6587\u7ed3\u5408\u52a8\u91cf\u8fed\u4ee3\u5feb\u901f\u68af\u5ea6\u7b26\u53f7(MI-FGSM)\u601d\u60f3\u63d0\u51fa\u4e86\u4e00\u79cd\u57fa\u4e8e\u8fc1\u79fb\u7684\u9ed1\u76d2\u653b\u51fb\u7b97\u6cd5\u3002\u9996\u5148\u7ed3\u5408SAR\u56fe\u50cf\u7279\u6027\u8fdb\u884c\u968f\u673a\u6591\u70b9\u566a\u58f0\u53d8\u6362\uff0c\u7f13\u89e3\u6a21\u578b\u5bf9\u6591\u70b9\u566a\u58f0\u7684\u8fc7\u62df\u5408\uff0c\u63d0\u9ad8\u7b97\u6cd5\u7684\u6cdb\u5316\u6027\u80fd\uff1b\u7136\u540e\u8bbe\u8ba1\u4e86\u80fd\u591f\u5feb\u901f\u5bfb\u627e\u6700\u4f18\u68af\u5ea6\u4e0b\u964d\u65b9\u5411\u7684ABN\u5bfb\u4f18\u5668\uff0c\u901a\u8fc7\u6a21\u578b\u68af\u5ea6\u5feb\u901f\u6536\u655b\u63d0\u5347\u7b97\u6cd5\u653b\u51fb\u6709\u6548\u6027\uff1b\u6700\u540e\u5f15\u5165\u62df\u53cc\u66f2\u52a8\u91cf\u7b97\u5b50\u83b7\u5f97\u7a33\u5b9a\u7684\u6a21\u578b\u68af\u5ea6\u4e0b\u964d\u65b9\u5411\uff0c\u4f7f\u68af\u5ea6\u5728\u5feb\u901f\u6536\u655b\u8fc7\u7a0b\u4e2d\u907f\u514d\u9677\u5165\u5c40\u90e8\u6700\u4f18\uff0c\u8fdb\u4e00\u6b65\u589e\u5f3a\u5bf9\u6297\u6837\u672c\u7684\u9ed1\u76d2\u653b\u51fb\u6210\u529f\u7387\u3002\u901a\u8fc7\u4eff\u771f\u5b9e\u9a8c\u8868\u660e\uff0c\u4e0e\u73b0\u6709\u7684\u5bf9\u6297\u653b\u51fb\u7b97\u6cd5\u76f8\u6bd4\uff0c\u8be5\u6587\u7b97\u6cd5\u5728MSTAR\u548cFUSAR-Ship\u6570\u636e\u96c6\u4e0a\u5bf9\u4e3b\u6d41\u7684SAR-ATR\u6df1\u5ea6\u795e\u7ecf\u7f51\u7edc\u7684\u96c6\u6210\u6a21\u578b\u9ed1\u76d2\u653b\u51fb\u6210\u529f\u7387\u5206\u522b\u63d0\u9ad8\u4e863%\uff5e55%\u548c6%\uff5e57.5%\uff0c\u800c\u4e14\u751f\u6210\u7684\u5bf9\u6297\u6837\u672c\u5177\u6709\u9ad8\u5ea6\u7684\u9690\u853d\u6027\u3002"
+ },
+ {
+ "name": "\u9762\u5411\u56fe\u50cf\u5206\u6790\u9886\u57df\u7684\u9ed1\u76d2\u5bf9\u6297\u653b\u51fb\u6280\u672f\u7efc\u8ff0",
+ "authors": [
+ "\u6b66\u9633",
+ "\u5218\u9756"
+ ],
+ "affiliations": [
+ "\u5185\u8499\u53e4\u5927\u5b66\u8ba1\u7b97\u673a\u5b66\u9662"
+ ],
+ "abstract": "\u56fe\u50cf\u9886\u57df\u4e0b\u7684\u9ed1\u76d2\u653b\u51fb\uff08Black-box Attack\uff09\u5df2\u6210\u4e3a\u5f53\u524d\u6df1\u5ea6\u795e\u7ecf\u7f51\u7edc\u5bf9\u6297\u653b\u51fb\u9886\u57df\u7684\u70ed\u70b9\u7814\u7a76\u65b9\u5411\u3002\u9ed1\u76d2\u653b\u51fb\u7684\u7279\u70b9\u5728\u4e8e\u4ec5\u5229\u7528\u6a21\u578b\u8f93\u5165\u4e0e\u8f93\u51fa\u7684\u6620\u5c04\u5173\u7cfb\uff0c\u800c\u65e0\u9700\u6a21\u578b\u5185\u90e8\u53c2\u6570\u4fe1\u606f\u53ca\u68af\u5ea6\u4fe1\u606f\uff0c\u901a\u8fc7\u5411\u56fe\u50cf\u6570\u636e\u52a0\u5165\u4eba\u7c7b\u96be\u4ee5\u5bdf\u89c9\u7684\u5fae\u5c0f\u6270\u52a8\uff0c\u8fdb\u800c\u9020\u6210\u6df1\u5ea6\u795e\u7ecf\u7f51\u7edc\uff08Deep Neural Network\uff0c DNN\uff09\u63a8\u7406\u4e0e\u8bc6\u522b\u5931\u51c6\uff0c\u5bfc\u81f4\u56fe\u50cf\u5206\u6790\u4efb\u52a1\u7684\u51c6\u786e\u7387\u4e0b\u964d\uff0c\u56e0\u6b64\u7531\u9ed1\u76d2\u653b\u51fb\u5f15\u8d77\u7684\u9c81\u68d2\u6027\u95ee\u9898\u6210\u4e3a\u5f53\u524dDNN\u6a21\u578b\u7814\u7a76\u7684\u5173\u952e\u95ee\u9898\u3002\u4e3a\u63d0\u9ad8\u9ed1\u76d2\u653b\u51fb\u5728\u56fe\u50cf\u5206\u6790\u4efb\u52a1\u4e0b\u7684\u653b\u51fb\u6210\u6548\uff0c\u73b0\u6709\u76f8\u5173\u7814\u7a76\u4ee5\u4f4e\u67e5\u8be2\u6b21\u6570\u3001\u4f4e\u6270\u52a8\u5e45\u5ea6\u3001\u9ad8\u653b\u51fb\u6210\u529f\u7387\u4f5c\u4e3a\u4f18\u5316\u76ee\u6807\uff0c\u9488\u5bf9\u4e0d\u540c\u56fe\u50cf\u5206\u6790\u4efb\u52a1\u91c7\u7528\u4e0d\u540c\u7684\u653b\u51fb\u6a21\u5f0f\u4e0e\u8bc4\u4f30\u65b9\u5f0f\u3002\u672c\u6587\u4ee5\u4e3b\u6d41\u7684\u56fe\u50cf\u5206\u6790\u4efb\u52a1\u4e3a\u51fa\u53d1\u70b9\uff0c\u9610\u8ff0\u56fe\u50cf\u5206\u7c7b\u3001\u76ee\u6807\u68c0\u6d4b\u4e0e\u56fe\u50cf\u5206\u5272\u4e09\u7c7b\u4efb\u52a1\u4e2d\u9ed1\u76d2\u653b\u51fb\u7b97\u6cd5\u7684\u6838\u5fc3\u601d\u60f3\u548c\u96be\u70b9\uff0c\u603b\u7ed3\u9ed1\u76d2\u5bf9\u6297\u653b\u51fb\u9886\u57df\u4e2d\u7684\u5173\u952e\u6982\u5ff5\u4e0e\u8bc4\u4f30\u6307\u6807\uff0c\u5206\u6790\u4e0d\u540c\u56fe\u50cf\u5206\u6790\u4efb\u52a1\u4e2d\u9ed1\u76d2\u5bf9\u6297\u653b\u51fb\u7684\u5b9e\u73b0\u7b56\u7565\u4e0e\u7814\u7a76\u76ee\u6807\u3002\u9610\u660e\u5404\u4e2a\u9ed1\u76d2\u653b\u51fb\u7b97\u6cd5\u95f4\u7684\u5173\u7cfb\u4e0e\u4f18\u52bf\uff0c\u4ece\u653b\u51fb\u6210\u529f\u7387\u3001\u67e5\u8be2\u6b21\u6570\uff0c\u4ee5\u53ca\u76f8\u4f3c\u6027\u5ea6\u91cf\u7b49\u591a\u4e2a\u65b9\u9762\u5bf9\u4e0d\u540c\u7684\u9ed1\u76d2\u653b\u51fb\u7b97\u6cd5\u8fdb\u884c\u6027\u80fd\u6bd4\u8f83\uff0c\u4ee5\u63d0\u51fa\u76ee\u524d\u56fe\u50cf\u5206\u6790\u9886\u57df\u4e2d\u9ed1\u76d2\u5bf9\u6297\u653b\u51fb\u4ecd\u5b58\u5728\u7684\u4e3b\u8981\u6311\u6218\u4e0e\u672a\u6765\u7814\u7a76\u65b9\u5411\u3002"
+ },
+ {
+ "name": "\u7164\u77ff\u4e95\u4e0b\u94bb\u8fdb\u901f\u5ea6\u5f71\u54cd\u56e0\u7d20\u53ca\u5176\u667a\u80fd\u9884\u6d4b\u65b9\u6cd5\u7814\u7a76",
+ "authors": [
+ "\u6234\u5251\u535a1",
+ "\u738b\u5fe0\u5bbe1",
+ "\u5f20\u74301",
+ "\u53f8\u57921",
+ "\u9b4f\u4e1c1",
+ "\u5468\u6587\u535a2",
+ "\u987e\u8fdb\u60521",
+ "\u90b9\u7b71\u745c1",
+ "\u5b8b\u96e8\u96e82"
+ ],
+ "affiliations": [
+ "1. \u4e2d\u56fd\u77ff\u4e1a\u5927\u5b66\u673a\u68b0\u5de5\u7a0b\u5b66\u9662",
+ "2. \u56db\u5ddd\u822a\u5929\u7cfb\u7edf\u5de5\u7a0b\u7814\u7a76\u6240"
+ ],
+ "abstract": "\u5728\u7164\u77ff\u4e95\u4e0b\u94bb\u63a2\u9886\u57df\uff0c\u94bb\u8fdb\u901f\u5ea6(DR)\u662f\u8bc4\u4f30\u94bb\u63a2\u4f5c\u4e1a\u6700\u6709\u6548\u6307\u6807\u4e4b\u4e00\uff0c\u94bb\u901f\u9884\u6d4b\u662f\u5b9e\u73b0\u7164\u77ff\u94bb\u8fdb\u667a\u80fd\u5316\u7684\u524d\u63d0\u6761\u4ef6\uff0c\u5bf9\u4e8e\u4f18\u5316\u94bb\u673a\u94bb\u8fdb\u53c2\u6570\u3001\u964d\u4f4e\u4f5c\u4e1a\u6210\u672c\u3001\u5b9e\u73b0\u5b89\u5168\u9ad8\u6548\u94bb\u63a2\u5177\u6709\u91cd\u8981\u610f\u4e49\u3002\u4e3a\u6b64\uff0c\u63d0\u51fa\u7164\u77ff\u4e95\u4e0b\u94bb\u8fdb\u901f\u5ea6\u5f71\u54cd\u56e0\u7d20\u53ca\u5176\u667a\u80fd\u9884\u6d4b\u65b9\u6cd5\u7814\u7a76\uff0c\u63a2\u7d22\u57fa\u4e8e\u94bb\u538b\u3001\u8f6c\u901f\u3001\u626d\u77e9\u4ee5\u53ca\u94bb\u8fdb\u6df1\u5ea6\u7b49\u5c11\u91cf\u94bb\u673a\u53c2\u6570\u91c7\u7528\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u5b9e\u73b0\u94bb\u8fdb\u901f\u5ea6\u7cbe\u51c6\u9884\u6d4b\u3002\u9996\u5148\u901a\u8fc7\u5b9e\u9a8c\u5ba4\u5fae\u94bb\u8bd5\u9a8c\uff0c\u6df1\u5165\u5206\u6790\u7164\u5ca9\u529b\u5b66\u6027\u80fd\u3001\u94bb\u538b\u3001\u8f6c\u901f\u548c\u94bb\u8fdb\u6df1\u5ea6\u5bf9\u626d\u77e9\u3001\u94bb\u8fdb\u901f\u5ea6\u5f71\u54cd\u89c4\u5f8b\u3002\u7814\u7a76\u7ed3\u679c\u663e\u793a\uff0c\u5728\u7164\u77ff\u4e95\u4e0b\u94bb\u8fdb\u8fc7\u7a0b\u4e2d\uff0c\u968f\u7740\u94bb\u8fdb\u538b\u529b\u589e\u5927\uff0c\u94bb\u8fdb\u901f\u5ea6\u5448\u9010\u6e10\u5347\u9ad8\u8d8b\u52bf\uff0c\u5728\u8f83\u9ad8\u7684\u8f6c\u901f\u6761\u4ef6\u4e0b\u94bb\u8fdb\u538b\u529b\u5bf9\u94bb\u8fdb\u901f\u5ea6\u5f71\u54cd\u66f4\u52a0\u660e\u663e\uff0c\u8f6c\u901f\u589e\u52a0\u6709\u5229\u4e8e\u63d0\u9ad8\u94bb\u8fdb\u901f\u5ea6\uff0c\u4f46\u8f6c\u901f\u5bf9\u786c\u5ea6\u8f83\u4f4e\u7684\u7164\u5c42\u94bb\u8fdb\u901f\u5ea6\u5f71\u54cd\u66f4\u4e3a\u663e\u8457\uff1b\u7136\u540e\uff0c\u6839\u636e\u7164\u77ff\u4e95\u4e0b\u9632\u51b2\u94bb\u5b54\u73b0\u573a\u6570\u636e\uff0c\u91c7\u7528K-\u8fd1\u90bb(KNN)\u3001\u652f\u6301\u5411\u91cf\u56de\u5f52(SVR)\u548c\u968f\u673a\u68ee\u6797\u56de\u5f52(RFR)\u4e09\u79cd\u4e0d\u540c\u7684\u673a\u5668\u5b66\u4e60\u7b97\u6cd5\u5efa\u7acb\u94bb\u8fdb\u901f\u5ea6\u9884\u6d4b\u6a21\u578b\uff0c\u5e76\u7ed3\u5408\u7c92\u5b50\u7fa4\u7b97\u6cd5(PSO)\u5bf9\u4e09\u79cd\u6a21\u578b\u8d85\u53c2\u6570\u8fdb\u884c\u4f18\u5316\uff0c\u6700\u540e\u5bf9\u6bd4\u5206\u6790PSO-KNN\u3001PSO-SVR\u548cPSO-RFR\u4e09\u79cd\u94bb\u8fdb\u901f\u5ea6\u9884\u6d4b\u6a21\u578b\u9884\u6d4b\u7ed3\u679c\u3002\u7814\u7a76\u7ed3\u679c\u8868\u660e\uff0cPSO-RFR\u6a21\u578b\u51c6\u786e\u6027\u6700\u597d\uff0c\u51b3\u5b9a\u7cfb\u6570R2\u9ad8\u8fbe0.963\uff0c\u5747\u65b9\u8bef\u5deeMSE\u4ec5\u670929.742\uff0c\u800cPSO-SVR\u6a21\u578b\u9c81\u68d2\u6027\u6700\u597d\uff0c\u5728\u5bf9\u6297\u653b\u51fb\u540e\u8bc4\u4ef7\u6307\u6807\u53d8\u5316\u7387\u6700\u5c0f\u3002\u672c\u6587\u7814\u7a76\u6709\u52a9\u4e8e\u5b9e\u73b0\u7164\u77ff\u4e95\u4e0b\u94bb\u8fdb\u901f\u5ea6\u7684\u7cbe\u51c6\u9884\u6d4b\uff0c\u4e3a\u7164\u77ff\u4e95\u4e0b\u667a\u80fd\u94bb\u8fdb\u53c2\u6570\u4f18\u5316\u63d0\u4f9b\u7406\u8bba\u652f\u6491\u3002 "
+ },
+ {
+ "name": "\u9488\u5bf9\u76ee\u6807\u68c0\u6d4b\u6a21\u578b\u7684\u7269\u7406\u5bf9\u6297\u653b\u51fb\u7efc\u8ff0",
+ "authors": [
+ "\u8521\u4f1f",
+ "\u72c4\u661f\u96e8",
+ "\u848b\u6615\u660a",
+ "\u738b\u946b",
+ "\u9ad8\u851a\u6d01"
+ ],
+ "affiliations": [
+ "\u706b\u7bad\u519b\u5de5\u7a0b\u5927\u5b66\u5bfc\u5f39\u5de5\u7a0b\u5b66\u9662"
+ ],
+ "abstract": "\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u5bb9\u6613\u53d7\u5230\u5bf9\u6297\u6837\u672c\u7684\u5f71\u54cd\uff0c\u5728\u56fe\u50cf\u4e0a\u6dfb\u52a0\u8089\u773c\u4e0d\u53ef\u89c1\u7684\u5fae\u5c0f\u6270\u52a8\u5c31\u53ef\u4ee5\u4f7f\u8bad\u7ec3\u6709\u7d20\u7684\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u5931\u7075\u3002\u6700\u8fd1\u7684\u7814\u7a76\u8868\u660e\u8fd9\u79cd\u6270\u52a8\u4e5f\u5b58\u5728\u4e8e\u73b0\u5b9e\u4e16\u754c\u4e2d\u3002\u805a\u7126\u4e8e\u6df1\u5ea6\u5b66\u4e60\u76ee\u6807\u68c0\u6d4b\u6a21\u578b\u7684\u7269\u7406\u5bf9\u6297\u653b\u51fb\uff0c\u660e\u786e\u4e86\u7269\u7406\u5bf9\u6297\u653b\u51fb\u7684\u6982\u5ff5\uff0c\u5e76\u4ecb\u7ecd\u4e86\u76ee\u6807\u68c0\u6d4b\u7269\u7406\u5bf9\u6297\u653b\u51fb\u7684\u4e00\u822c\u6d41\u7a0b\uff0c\u4f9d\u636e\u653b\u51fb\u4efb\u52a1\u7684\u4e0d\u540c\u4ece\u8f66\u8f86\u68c0\u6d4b\u548c\u884c\u4eba\u68c0\u6d4b\u7efc\u8ff0\u4e86\u8fd1\u5e74\u6765\u4e00\u7cfb\u5217\u9488\u5bf9\u76ee\u6807\u68c0\u6d4b\u7f51\u7edc\u7684\u7269\u7406\u5bf9\u6297\u653b\u51fb\u65b9\u6cd5\uff0c\u4ee5\u53ca\u7b80\u5355\u4ecb\u7ecd\u4e86\u5176\u4ed6\u9488\u5bf9\u76ee\u6807\u68c0\u6d4b\u6a21\u578b\u7684\u653b\u51fb\u3001\u5176\u4ed6\u653b\u51fb\u4efb\u52a1\u548c\u5176\u4ed6\u653b\u51fb\u65b9\u5f0f\u3002\u6700\u540e\uff0c\u8ba8\u8bba\u4e86\u7269\u7406\u5bf9\u6297\u653b\u51fb\u5f53\u524d\u9762\u4e34\u7684\u6311\u6218\uff0c\u5f15\u51fa\u5bf9\u6297\u8bad\u7ec3\u7684\u5c40\u9650\u6027\u5e76\u5c55\u671b\u672a\u6765\u53ef\u80fd\u7684\u53d1\u5c55\u65b9\u5411\u548c\u5e94\u7528\u524d\u666f\u3002"
+ },
+ {
+ "name": "\u9488\u5bf9\u81ea\u52a8\u9a7e\u9a76\u667a\u80fd\u6a21\u578b\u7684\u653b\u51fb\u4e0e\u9632\u5fa1",
+ "authors": [
+ "\u9a6c\u66681,2",
+ "\u6c88\u8d851,2",
+ "\u853a\u741b\u76931,2",
+ "\u674e\u524d1,2",
+ "\u738b\u9a9e3",
+ "\u674e\u74264",
+ "\u7ba1\u6653\u5b8f1,2"
+ ],
+ "affiliations": [
+ "1. \u897f\u5b89\u4ea4\u901a\u5927\u5b66\u7535\u5b50\u4e0e\u4fe1\u606f\u5b66\u90e8\u7f51\u7edc\u7a7a\u95f4\u5b89\u5168\u5b66\u9662",
+ "2. \u667a\u80fd\u7f51\u7edc\u4e0e\u7f51\u7edc\u5b89\u5168\u6559\u80b2\u90e8\u91cd\u70b9\u5b9e\u9a8c\u5ba4(\u897f\u5b89\u4ea4\u901a\u5927\u5b66)",
+ "3. \u6b66\u6c49\u5927\u5b66\u56fd\u5bb6\u7f51\u7edc\u5b89\u5168\u5b66\u9662",
+ "4. \u6e05\u534e\u5927\u5b66\u7f51\u7edc\u79d1\u5b66\u4e0e\u7f51\u7edc\u7a7a\u95f4\u7814\u7a76\u9662"
+ ],
+ "abstract": "\u8fd1\u5e74\u6765\uff0c\u4ee5\u6df1\u5ea6\u5b66\u4e60\u7b97\u6cd5\u4e3a\u4ee3\u8868\u7684\u4eba\u5de5\u667a\u80fd\u6280\u672f\u4e3a\u4eba\u7c7b\u751f\u4ea7\u751f\u6d3b\u7684\u65b9\u65b9\u9762\u9762\u5e26\u6765\u4e86\u5de8\u5927\u7684\u9769\u65b0\uff0c\u5c24\u5176\u662f\u5728\u81ea\u52a8\u9a7e\u9a76\u9886\u57df\uff0c\u90e8\u7f72\u7740\u81ea\u52a8\u9a7e\u9a76\u7cfb\u7edf\u7684\u667a\u80fd\u6c7d\u8f66\u5df2\u7ecf\u8d70\u8fdb\u4eba\u4eec\u7684\u751f\u6d3b\uff0c\u6210\u4e3a\u4e86\u91cd\u8981\u7684\u751f\u4ea7\u529b\u5de5\u5177\u3002\u7136\u800c\uff0c\u81ea\u52a8\u9a7e\u9a76\u7cfb\u7edf\u4e2d\u7684\u4eba\u5de5\u667a\u80fd\u6a21\u578b\u9762\u4e34\u7740\u6f5c\u5728\u7684\u5b89\u5168\u9690\u60a3\u548c\u98ce\u9669\uff0c\u8fd9\u7ed9\u4eba\u6c11\u7fa4\u4f17\u751f\u547d\u8d22\u4ea7\u5b89\u5168\u5e26\u6765\u4e86\u4e25\u91cd\u5a01\u80c1\u3002\u672c\u6587\u901a\u8fc7\u56de\u987e\u81ea\u52a8\u9a7e\u9a76\u667a\u80fd\u6a21\u578b\u653b\u51fb\u548c\u9632\u5fa1\u7684\u76f8\u5173\u7814\u7a76\u5de5\u4f5c\uff0c\u63ed\u793a\u81ea\u52a8\u9a7e\u9a76\u7cfb\u7edf\u5728\u7269\u7406\u4e16\u754c\u4e0b\u9762\u4e34\u7684\u5b89\u5168\u98ce\u9669\u5e76\u5f52\u7eb3\u603b\u7ed3\u4e86\u76f8\u5e94\u7684\u9632\u5fa1\u5bf9\u7b56\u3002\u5177\u4f53\u6765\u8bf4\uff0c\u672c\u6587\u9996\u5148\u4ecb\u7ecd\u4e86\u5305\u542b\u653b\u51fb\u9762\u3001\u653b\u51fb\u80fd\u529b\u548c\u653b\u51fb\u76ee\u6807\u7684\u81ea\u52a8\u9a7e\u9a76\u7cfb\u7edf\u5b89\u5168\u98ce\u9669\u6a21\u578b\u3002\u5176\u6b21\uff0c\u9762\u5411\u81ea\u52a8\u9a7e\u9a76\u7cfb\u7edf\u7684\u4e09\u4e2a\u5173\u952e\u529f\u80fd\u5c42\u2014\u2014\u4f20\u611f\u5668\u5c42\u3001\u611f\u77e5\u5c42\u548c\u51b3\u7b56\u5c42\uff0c\u672c\u6587\u4f9d\u636e\u53d7\u653b\u51fb\u7684\u667a\u80fd\u6a21\u578b\u548c\u653b\u51fb\u624b\u6bb5\u5f52\u7eb3\u3001\u5206\u6790\u4e86\u5bf9\u5e94\u7684\u653b\u51fb\u65b9\u6cd5\u4ee5\u53ca\u9632\u5fa1\u5bf9\u7b56\uff0c\u5e76\u63a2\u8ba8\u4e86\u73b0\u6709\u65b9\u6cd5\u7684\u5c40\u9650\u6027\u3002\u6700\u540e\uff0c\u672c\u6587\u8ba8\u8bba\u548c\u5c55\u671b\u4e86\u81ea\u52a8\u9a7e\u9a76\u667a\u80fd\u6a21\u578b\u653b\u51fb\u4e0e\u9632\u5fa1\u6280\u672f\u9762\u4e34\u7684\u96be\u9898\u4e0e\u6311\u6218\uff0c\u5e76\u6307\u51fa\u4e86\u672a\u6765\u6f5c\u5728\u7684\u7814\u7a76\u65b9\u5411\u548c\u53d1\u5c55\u8d8b\u52bf."
+ },
+ {
+ "name": "\u9690\u79c1\u4fdd\u62a4\u7684\u56fe\u50cf\u66ff\u4ee3\u6570\u636e\u751f\u6210\u65b9\u6cd5",
+ "authors": [
+ "\u674e\u5a49\u83b91,2",
+ "\u5218\u5b66\u82731,2",
+ "\u6768\u535a1,2"
+ ],
+ "affiliations": [
+ "1. \u5409\u6797\u5927\u5b66\u8ba1\u7b97\u673a\u79d1\u5b66\u4e0e\u6280\u672f\u5b66\u9662",
+ "2. \u5409\u6797\u5927\u5b66\u7b26\u53f7\u8ba1\u7b97\u4e0e\u77e5\u8bc6\u5de5\u7a0b\u6559\u80b2\u90e8\u91cd\u70b9\u5b9e\u9a8c\u5ba4"
+ ],
+ "abstract": "\u9488\u5bf9\u73b0\u6709\u56fe\u50cf\u6570\u636e\u96c6\u5b58\u5728\u7684\u9690\u79c1\u4fdd\u62a4\u9700\u6c42\uff0c\u63d0\u51fa\u4e00\u79cd\u56fe\u50cf\u6570\u636e\u96c6\u9690\u79c1\u4fdd\u62a4\u573a\u666f\u53ca\u8be5\u573a\u666f\u4e0b\u9690\u79c1\u4fdd\u62a4\u7684\u56fe\u50cf\u66ff\u4ee3\u6570\u636e\u751f\u6210\u65b9\u6cd5\u3002\u8be5\u573a\u666f\u5229\u7528\u7ecf\u9690\u79c1\u4fdd\u62a4\u65b9\u6cd5\u5904\u7406\u540e\u7684\u66ff\u4ee3\u56fe\u50cf\u6570\u636e\u96c6\u53d6\u4ee3\u539f\u59cb\u56fe\u50cf\u6570\u636e\u96c6\uff0c\u5176\u4e2d\u66ff\u4ee3\u56fe\u50cf\u4e0e\u539f\u59cb\u56fe\u50cf\u4e00\u4e00\u5bf9\u5e94\uff0c\u4eba\u7c7b\u65e0\u6cd5\u8bc6\u522b\u66ff\u4ee3\u56fe\u50cf\u6240\u5c5e\u7c7b\u522b\uff0c\u66ff\u4ee3\u56fe\u50cf\u53ef\u8bad\u7ec3\u73b0\u6709\u7684\u6df1\u5ea6\u5b66\u4e60\u56fe\u50cf\u5206\u7c7b\u7b97\u6cd5\uff0c\u4e14\u5177\u6709\u8f83\u597d\u7684\u5206\u7c7b\u6548\u679c\u3002\u540c\u65f6\u9488\u5bf9\u4e0a\u8ff0\u573a\u666f\uff0c\u6539\u8fdb\u4e86\u57fa\u4e8e\u6295\u5f71\u68af\u5ea6\u4e0b\u964d(PGD:Project Gradient Descent)\u653b\u51fb\u7684\u6570\u636e\u9690\u79c1\u4fdd\u62a4\u65b9\u6cd5\uff0c\u5c06\u539f\u59cbPGD\u653b\u51fb\u76ee\u6807\u7531\u6807\u7b7e\u6539\u4e3a\u56fe\u50cf\uff0c\u5373\u56fe\u50cf\u5bf9\u56fe\u50cf\u7684\u653b\u51fb\uff0c\u5e76\u4f7f\u7528\u7ecf\u8fc7\u5bf9\u6297\u8bad\u7ec3\u7684\u9c81\u68d2\u6a21\u578b\u8fdb\u884c\u56fe\u50cf\u5bf9\u56fe\u50cf\u653b\u51fb\u4f5c\u4e3a\u66ff\u4ee3\u6570\u636e\u7684\u751f\u6210\u65b9\u6cd5\u3002\u5728\u6807\u51c6\u6d4b\u8bd5\u96c6\u4e0a\uff0c\u66ff\u4ee3\u540e\u7684CIFAR(Canadian Institute For Advanced Research 10)\u6570\u636e\u96c6\u548cCINIC\u6570\u636e\u96c6\u5728\u56fe\u50cf\u5206\u7c7b\u4efb\u52a1\u4e0a\u5206\u522b\u53d6\u5f97\u4e8687.15%\u548c74.04%\u7684\u6d4b\u8bd5\u6b63\u786e\u7387\u3002\u5b9e\u9a8c\u7ed3\u679c\u8868\u660e\uff0c\u8be5\u65b9\u6cd5\u80fd\u5728\u4fdd\u8bc1\u66ff\u4ee3\u6570\u636e\u96c6\u5bf9\u4eba\u7c7b\u9690\u79c1\u6027\u7684\u524d\u63d0\u4e0b\uff0c\u751f\u6210\u539f\u59cb\u6570\u636e\u96c6\u7684\u66ff\u4ee3\u6570\u636e\u96c6\uff0c\u5e76\u4fdd\u8bc1\u73b0\u6709\u65b9\u6cd5\u5728\u8be5\u6570\u636e\u96c6\u4e0a\u7684\u5206\u7c7b\u6027\u80fd\u3002"
+ },
+ {
+ "name": "\u7ed3\u5408\u81ea\u9002\u5e94\u6b65\u957f\u7b56\u7565\u548c\u6570\u636e\u589e\u5f3a\u673a\u5236\u63d0\u5347\u5bf9\u6297\u653b\u51fb\u8fc1\u79fb\u6027",
+ "authors": [
+ "\u9c8d\u857e1",
+ "\u9676\u851a2",
+ "\u9676\u537f1"
+ ],
+ "affiliations": [
+ "1. \u4e2d\u56fd\u4eba\u6c11\u89e3\u653e\u519b\u9646\u519b\u70ae\u5175\u9632\u7a7a\u5175\u5b66\u9662\u4fe1\u606f\u5de5\u7a0b\u7cfb",
+ "2. \u4e2d\u56fd\u4eba\u6c11\u89e3\u653e\u519b\u519b\u4e8b\u79d1\u5b66\u9662"
+ ],
+ "abstract": "\u6df1\u5ea6\u795e\u7ecf\u7f51\u7edc\u5177\u6709\u8106\u5f31\u6027\uff0c\u5bb9\u6613\u88ab\u7cbe\u5fc3\u8bbe\u8ba1\u7684\u5bf9\u6297\u6837\u672c\u653b\u51fb.\u68af\u5ea6\u653b\u51fb\u65b9\u6cd5\u5728\u767d\u76d2\u6a21\u578b\u4e0a\u653b\u51fb\u6210\u529f\u7387\u8f83\u9ad8\uff0c\u4f46\u5728\u9ed1\u76d2\u6a21\u578b\u4e0a\u7684\u8fc1\u79fb\u6027\u8f83\u5f31.\u57fa\u4e8eHeavy-ball\u578b\u52a8\u91cf\u548cNesterov\u578b\u52a8\u91cf\u7684\u68af\u5ea6\u653b\u51fb\u65b9\u6cd5\u7531\u4e8e\u5728\u66f4\u65b0\u65b9\u5411\u4e0a\u8003\u8651\u4e86\u5386\u53f2\u68af\u5ea6\u4fe1\u606f\uff0c\u63d0\u5347\u4e86\u5bf9\u6297\u6837\u672c\u7684\u8fc1\u79fb\u6027.\u4e3a\u4e86\u8fdb\u4e00\u6b65\u4f7f\u7528\u5386\u53f2\u68af\u5ea6\u4fe1\u606f\uff0c\u672c\u6587\u9488\u5bf9\u6536\u655b\u6027\u66f4\u597d\u7684Nesterov\u578b\u52a8\u91cf\u65b9\u6cd5\uff0c\u4f7f\u7528\u81ea\u9002\u5e94\u6b65\u957f\u7b56\u7565\u4ee3\u66ff\u76ee\u524d\u5e7f\u6cdb\u4f7f\u7528\u7684\u56fa\u5b9a\u6b65\u957f\uff0c\u63d0\u51fa\u4e86\u4e00\u79cd\u65b9\u5411\u548c\u6b65\u957f\u5747\u4f7f\u7528\u5386\u53f2\u68af\u5ea6\u4fe1\u606f\u7684\u8fed\u4ee3\u5feb\u901f\u68af\u5ea6\u65b9\u6cd5\uff08Nesterov and Adaptive-learning-rate based Iterative Fast Gradient Method,NAI-FGM\uff09.\u6b64\u5916\uff0c\u672c\u6587\u8fd8\u63d0\u51fa\u4e86\u4e00\u79cd\u7ebf\u6027\u53d8\u6362\u4e0d\u53d8\u6027\uff08Linear-transformation Invariant Method,LIM\uff09\u7684\u6570\u636e\u589e\u5f3a\u65b9\u6cd5 .\u5b9e\u9a8c\u7ed3\u679c\u8bc1\u5b9e\u4e86NAI-FGM\u653b\u51fb\u65b9\u6cd5\u548cLIM\u6570\u636e\u589e\u5f3a\u7b56\u7565\u76f8\u5bf9\u4e8e\u540c\u7c7b\u578b\u65b9\u6cd5\u5747\u5177\u6709\u66f4\u9ad8\u7684\u9ed1\u76d2\u653b\u51fb\u6210\u529f\u7387.\u7ec4\u5408NAI-FGM\u65b9\u6cd5\u548cLIM\u7b56\u7565\u751f\u6210\u5bf9\u6297\u6837\u672c\uff0c\u5728\u5e38\u89c4\u8bad\u7ec3\u6a21\u578b\u4e0a\u7684\u5e73\u5747\u9ed1\u76d2\u653b\u51fb\u6210\u529f\u7387\u8fbe\u523087.8%\uff0c\u5728\u5bf9\u6297\u8bad\u7ec3\u6a21\u578b\u4e0a\u7684\u5e73\u5747\u9ed1\u76d2\u653b\u51fb\u6210\u529f\u7387\u8fbe\u523057.5%\uff0c\u5728\u9632\u5fa1\u6a21\u578b\u4e0a\u7684\u5e73\u5747\u9ed1\u76d2\u653b\u51fb\u6210\u529f\u7387\u8fbe\u523067.2%\uff0c\u5747\u8d85\u8fc7\u73b0\u6709\u6700\u9ad8\u6c34\u5e73. "
+ },
+ {
+ "name": "\u9488\u5bf9\u8eab\u4efd\u8bc1\u6587\u672c\u8bc6\u522b\u7684\u9ed1\u76d2\u653b\u51fb\u7b97\u6cd5\u7814\u7a76",
+ "authors": [
+ "\u5f90\u660c\u51ef1,2",
+ "\u51af\u536b\u680b1,2",
+ "\u5f20\u6df3\u67701,2",
+ "\u90d1\u6653\u9f993,4,5",
+ "\u5f20\u8f896",
+ "\u738b\u98de\u8dc33,4,5"
+ ],
+ "affiliations": [
+ "1. \u5317\u4eac\u4ea4\u901a\u5927\u5b66\u8ba1\u7b97\u673a\u4e0e\u4fe1\u606f\u6280\u672f\u5b66\u9662\u4fe1\u606f\u79d1\u5b66\u7814\u7a76\u6240",
+ "2. \u73b0\u4ee3\u4fe1\u606f\u79d1\u5b66\u4e0e\u7f51\u7edc\u6280\u672f\u5317\u4eac\u5e02\u91cd\u70b9\u5b9e\u9a8c\u5ba4",
+ "3. \u4e2d\u56fd\u79d1\u5b66\u9662\u81ea\u52a8\u5316\u7814\u7a76\u6240\u591a\u6a21\u6001\u4eba\u5de5\u667a\u80fd\u7cfb\u7edf\u5168\u56fd\u91cd\u70b9\u5b9e\u9a8c\u5ba4",
+ "4. \u4e2d\u56fd\u79d1\u5b66\u9662\u81ea\u52a8\u5316\u7814\u7a76\u6240\u590d\u6742\u7cfb\u7edf\u7ba1\u7406\u4e0e\u63a7\u5236\u56fd\u5bb6\u91cd\u70b9\u5b9e\u9a8c\u5ba4",
+ "5. \u4e2d\u56fd\u79d1\u5b66\u9662\u5927\u5b66\u4eba\u5de5\u667a\u80fd\u5b66\u9662",
+ "6. \u5317\u4eac\u822a\u7a7a\u822a\u5929\u5927\u5b66\u4ea4\u901a\u79d1\u5b66\u4e0e\u5de5\u7a0b\u5b66\u9662"
+ ],
+ "abstract": "\u8eab\u4efd\u8bc1\u8ba4\u8bc1\u573a\u666f\u591a\u91c7\u7528\u6587\u672c\u8bc6\u522b\u6a21\u578b\u5bf9\u8eab\u4efd\u8bc1\u56fe\u7247\u7684\u5b57\u6bb5\u8fdb\u884c\u63d0\u53d6\u3001\u8bc6\u522b\u548c\u8eab\u4efd\u8ba4\u8bc1,\u5b58\u5728\u5f88\u5927\u7684\u9690\u79c1\u6cc4\u9732\u9690\u60a3.\u5e76\u4e14,\u5f53\u524d\u57fa\u4e8e\u6587\u672c\u8bc6\u522b\u6a21\u578b\u7684\u5bf9\u6297\u653b\u51fb\u7b97\u6cd5\u5927\u591a\u53ea\u8003\u8651\u7b80\u5355\u80cc\u666f\u7684\u6570\u636e(\u5982\u5370\u5237\u4f53)\u548c\u767d\u76d2\u6761\u4ef6,\u5f88\u96be\u5728\u7269\u7406\u4e16\u754c\u8fbe\u5230\u7406\u60f3\u7684\u653b\u51fb\u6548\u679c,\u4e0d\u9002\u7528\u4e8e\u590d\u6742\u80cc\u666f\u3001\u6570\u636e\u53ca\u9ed1\u76d2\u6761\u4ef6.\u4e3a\u7f13\u89e3\u4e0a\u8ff0\u95ee\u9898,\u672c\u6587\u63d0\u51fa\u9488\u5bf9\u8eab\u4efd\u8bc1\u6587\u672c\u8bc6\u522b\u6a21\u578b\u7684\u9ed1\u76d2\u653b\u51fb\u7b97\u6cd5,\u8003\u8651\u8f83\u4e3a\u590d\u6742\u7684\u56fe\u50cf\u80cc\u666f\u3001\u66f4\u4e25\u82db\u7684\u9ed1\u76d2\u6761\u4ef6\u4ee5\u53ca\u7269\u7406\u4e16\u754c\u7684\u653b\u51fb\u6548\u679c.\u672c\u7b97\u6cd5\u5728\u57fa\u4e8e\u8fc1\u79fb\u7684\u9ed1\u76d2\u653b\u51fb\u7b97\u6cd5\u7684\u57fa\u7840\u4e0a\u5f15\u5165\u4e8c\u503c\u5316\u63a9\u7801\u548c\u7a7a\u95f4\u53d8\u6362,\u5728\u4fdd\u8bc1\u653b\u51fb\u6210\u529f\u7387\u7684\u524d\u63d0\u4e0b\u63d0\u5347\u4e86\u5bf9\u6297\u6837\u672c\u7684\u89c6\u89c9\u6548\u679c\u548c\u7269\u7406\u4e16\u754c\u4e2d\u7684\u9c81\u68d2\u6027.\u901a\u8fc7\u63a2\u7d22\u4e0d\u540c\u8303\u6570\u9650\u5236\u4e0b\u57fa\u4e8e\u8fc1\u79fb\u7684\u9ed1\u76d2\u653b\u51fb\u7b97\u6cd5\u7684\u6027\u80fd\u4e0a\u9650\u548c\u5173\u952e\u8d85\u53c2\u6570\u7684\u5f71\u54cd,\u672c\u7b97\u6cd5\u5728\u767e\u5ea6\u8eab\u4efd\u8bc1\u8bc6\u522b\u6a21\u578b\u4e0a\u5b9e\u73b0\u4e86100%\u7684\u653b\u51fb\u6210\u529f\u7387.\u8eab\u4efd\u8bc1\u6570\u636e\u96c6\u540e\u7eed\u5c06\u5f00\u6e90."
+ },
+ {
+ "name": "\u57fa\u4e8e\u667a\u80fd\u8fdb\u5316\u7b97\u6cd5\u7684\u53ef\u89c1\u6c34\u5370\u5bf9\u6297\u653b\u51fb",
+ "authors": [
+ "\u5b63\u4fca\u8c6a1",
+ "\u5f20\u7389\u4e661",
+ "\u8d75\u82e5\u5b871",
+ "\u6e29\u6587\u5a962",
+ "\u8463\u74063"
+ ],
+ "affiliations": [
+ "1. \u5357\u4eac\u822a\u7a7a\u822a\u5929\u5927\u5b66\u8ba1\u7b97\u673a\u79d1\u5b66\u4e0e\u6280\u672f\u5b66\u9662",
+ "2. \u6c5f\u897f\u8d22\u7ecf\u5927\u5b66\u4fe1\u606f\u7ba1\u7406\u5b66\u9662",
+ "3. \u5b81\u6ce2\u5927\u5b66\u4fe1\u606f\u79d1\u5b66\u4e0e\u5de5\u7a0b\u5b66\u9662"
+ ],
+ "abstract": "\u968f\u7740\u516c\u6c11\u7248\u6743\u610f\u8bc6\u7684\u63d0\u9ad8\uff0c\u8d8a\u6765\u8d8a\u591a\u542b\u6709\u6c34\u5370\u7684\u56fe\u50cf\u51fa\u73b0\u5728\u751f\u6d3b\u4e2d\u3002\u7136\u800c\uff0c\u73b0\u6709\u7684\u7814\u7a76\u8868\u660e\uff0c\u542b\u6709\u6c34\u5370\u7684\u56fe\u50cf\u4f1a\u5bfc\u81f4\u795e\u7ecf\u7f51\u7edc\u5206\u7c7b\u9519\u8bef\uff0c\u8fd9\u5bf9\u795e\u7ecf\u7f51\u7edc\u7684\u666e\u53ca\u548c\u5e94\u7528\u6784\u6210\u4e86\u5de8\u5927\u7684\u5a01\u80c1\u3002\u5bf9\u6297\u8bad\u7ec3\u662f\u89e3\u51b3\u8fd9\u7c7b\u95ee\u9898\u7684\u9632\u5fa1\u65b9\u6cd5\u4e4b\u4e00\uff0c\u4f46\u662f\u9700\u8981\u4f7f\u7528\u5927\u91cf\u7684\u6c34\u5370\u5bf9\u6297\u6837\u672c\u4f5c\u4e3a\u8bad\u7ec3\u6570\u636e\u3002\u4e3a\u6b64\uff0c\u63d0\u51fa\u4e86\u4e00\u79cd\u57fa\u4e8e\u667a\u80fd\u8fdb\u5316\u7b97\u6cd5\u7684\u53ef\u89c1\u6c34\u5370\u5bf9\u6297\u653b\u51fb\u65b9\u6cd5\u6765\u751f\u6210\u9ad8\u5f3a\u5ea6\u7684\u6c34\u5370\u5bf9\u6297\u6837\u672c\u3002\u8be5\u65b9\u6cd5\u4e0d\u4ec5\u80fd\u5feb\u901f\u751f\u6210\u6c34\u5370\u5bf9\u6297\u6837\u672c\uff0c\u800c\u4e14\u8fd8\u80fd\u4f7f\u5176\u6700\u5927\u7a0b\u5ea6\u5730\u653b\u51fb\u795e\u7ecf\u7f51\u7edc\u3002\u6b64\u5916\uff0c\u8be5\u65b9\u6cd5\u8fd8\u52a0\u5165\u4e86\u56fe\u50cf\u8d28\u91cf\u8bc4\u4ef7\u6307\u6807\u6765\u7ea6\u675f\u56fe\u50cf\u7684\u89c6\u89c9\u635f\u5931\uff0c\u4ece\u800c\u4f7f\u6c34\u5370\u5bf9\u6297\u6837\u672c\u66f4\u52a0\u7f8e\u89c2\u3002\u5b9e\u9a8c\u7ed3\u679c\u8868\u660e\uff0c\u6240\u63d0\u65b9\u6cd5\u76f8\u6bd4\u4e8e\u57fa\u51c6\u6c34\u5370\u653b\u51fb\u65b9\u6cd5\u65f6\u95f4\u590d\u6742\u5ea6\u66f4\u4f4e\uff0c\u76f8\u6bd4\u4e8e\u57fa\u51c6\u9ed1\u76d2\u653b\u51fb\u5bf9\u795e\u7ecf\u7f51\u7edc\u653b\u51fb\u6210\u529f\u7387\u66f4\u9ad8\u3002"
+ },
+ {
+ "name": "\u57fa\u4e8e\u566a\u58f0\u7834\u574f\u548c\u6ce2\u5f62\u91cd\u5efa\u7684\u58f0\u7eb9\u5bf9\u6297\u6837\u672c\u9632\u5fa1\u65b9\u6cd5",
+ "authors": [
+ "\u9b4f\u6625\u96e81",
+ "\u5b59\u84991",
+ "\u5f20\u96c4\u4f1f1",
+ "\u90b9\u971e1",
+ "\u5370\u67702"
+ ],
+ "affiliations": [
+ "1. \u9646\u519b\u5de5\u7a0b\u5927\u5b66\u6307\u6325\u63a7\u5236\u5de5\u7a0b\u5b66\u9662",
+ "2. \u6c5f\u82cf\u8b66\u5b98\u5b66\u9662"
+ ],
+ "abstract": "\u8bed\u97f3\u662f\u4eba\u7c7b\u6700\u91cd\u8981\u7684\u4ea4\u6d41\u65b9\u5f0f\u4e4b\u4e00\u3002\u8bed\u97f3\u4fe1\u53f7\u4e2d\u9664\u4e86\u6587\u672c\u5185\u5bb9\u5916,\u8fd8\u5305\u542b\u4e86\u8bf4\u8bdd\u4eba\u7684\u8eab\u4efd\u3001\u79cd\u65cf\u3001\u5e74\u9f84\u3001\u6027\u522b\u548c\u60c5\u611f\u7b49\u4e30\u5bcc\u7684\u4fe1\u606f,\u5176\u4e2d\u8bf4\u8bdd\u4eba\u8eab\u4efd\u7684\u8bc6\u522b\u4e5f\u88ab\u79f0\u4e3a\u58f0\u7eb9\u8bc6\u522b,\u662f\u4e00\u79cd\u751f\u7269\u7279\u5f81\u8bc6\u522b\u6280\u672f\u3002\u58f0\u7eb9\u5177\u6709\u83b7\u53d6\u65b9\u4fbf\u3001\u5bb9\u6613\u4fdd\u5b58\u3001\u4f7f\u7528\u7b80\u5355\u7b49\u7279\u70b9,\u800c\u6df1\u5ea6\u5b66\u4e60\u6280\u672f\u7684\u8fdb\u6b65\u4e5f\u6781\u5927\u5730\u4fc3\u8fdb\u4e86\u8bc6\u522b\u51c6\u786e\u7387\u7684\u63d0\u5347,\u56e0\u6b64,\u58f0\u7eb9\u8bc6\u522b\u5df2\u88ab\u5e94\u7528\u4e8e\u667a\u6167\u91d1\u878d\u3001\u667a\u80fd\u5bb6\u5c45\u3001\u8bed\u97f3\u52a9\u624b\u548c\u53f8\u6cd5\u8c03\u67e5\u7b49\u9886\u57df\u3002\u53e6\u4e00\u65b9\u9762,\u9488\u5bf9\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u7684\u5bf9\u6297\u6837\u672c\u653b\u51fb\u53d7\u5230\u4e86\u5e7f\u6cdb\u5173\u6ce8,\u5728\u8f93\u5165\u4fe1\u53f7\u4e2d\u6dfb\u52a0\u4e0d\u53ef\u611f\u77e5\u7684\u5fae\u5c0f\u6270\u52a8\u5373\u53ef\u5bfc\u81f4\u6a21\u578b\u9884\u6d4b\u7ed3\u679c\u9519\u8bef\u3002\u5bf9\u6297\u6837\u672c\u7684\u51fa\u73b0\u5bf9\u57fa\u4e8e\u6df1\u5ea6\u5b66\u4e60\u7684\u58f0\u7eb9\u8bc6\u522b\u4e5f\u5c06\u9020\u6210\u5de8\u5927\u7684\u5b89\u5168\u5a01\u80c1\u3002\u73b0\u6709\u58f0\u7eb9\u5bf9\u6297\u6837\u672c\u9632\u5fa1\u65b9\u6cd5\u4f1a\u4e0d\u540c\u7a0b\u5ea6\u5730\u5f71\u54cd\u6b63\u5e38\u6837\u672c\u7684\u8bc6\u522b,\u5e76\u4e14\u5c40\u9650\u4e8e\u7279\u5b9a\u7684\u653b\u51fb\u65b9\u6cd5\u6216\u8bc6\u522b\u6a21\u578b,\u9c81\u68d2\u6027\u8f83\u5dee\u3002\u4e3a\u4e86\u4f7f\u5bf9\u6297\u9632\u5fa1\u80fd\u591f\u517c\u987e\u7ea0\u6b63\u9519\u8bef\u8f93\u51fa\u548c\u51c6\u786e\u8bc6\u522b\u6b63\u5e38\u6837\u672c\u4e24\u4e2a\u65b9\u9762,\u672c\u6587\u63d0\u51fa\u4e00\u79cd\u201c\u7834\u574f+\u91cd\u5efa\u201d\u7684\u4e24\u9636\u6bb5\u5bf9\u6297\u6837\u672c\u9632\u5fa1\u65b9\u6cd5\u3002\u7b2c\u4e00\u9636\u6bb5,\u5728\u5bf9\u6297\u6837\u672c\u4e2d\u6dfb\u52a0\u5177\u6709\u4e00\u5b9a\u4fe1\u566a\u6bd4\u5e45\u5ea6\u9650\u5236\u7684\u9ad8\u65af\u767d\u566a\u58f0,\u7834\u574f\u5bf9\u6297\u6270\u52a8\u7684\u7ed3\u6784\u8fdb\u800c\u6d88\u9664\u6837\u672c\u7684\u5bf9\u6297\u6027\u3002\u7b2c\u4e8c\u9636\u6bb5,\u5229\u7528\u63d0\u51fa\u7684\u540d\u4e3aSCAT-Wave-U-Net\u7684\u8bed\u97f3\u589e\u5f3a\u6a21\u578b\u91cd\u5efa\u539f\u59cb\u8bed\u97f3\u6837\u672c,\u901a\u8fc7\u5728Wave-U-Net\u6a21\u578b\u7ed3\u6784\u4e2d\u5f15\u5165Transformer\u5168\u5c40\u591a\u5934\u81ea\u6ce8\u610f\u529b\u548c\u5c42\u95f4\u4ea4\u53c9\u6ce8\u610f\u529b\u673a\u5236,\u4f7f\u6539\u8fdb\u540e\u7684\u6a21\u578b\u66f4\u6709\u52a9\u4e8e\u9632\u5fa1\u58f0\u7eb9\u5bf9\u6297\u6837\u672c\u653b\u51fb\u3002\u5b9e\u9a8c\u8868\u660e,\u63d0\u51fa\u7684\u9632\u5fa1\u65b9\u6cd5\u4e0d\u4f9d\u8d56\u4e8e\u7279\u5b9a\u58f0\u7eb9\u8bc6\u522b\u7cfb\u7edf\u548c\u5bf9\u6297\u6837\u672c\u653b\u51fb\u65b9\u5f0f,\u5728\u4e24\u79cd\u5178\u578b\u7684\u58f0\u7eb9\u8bc6\u522b\u7cfb\u7edf\u4e0b\u5bf9\u591a\u79cd\u7c7b\u578b\u5bf9\u6297\u6837\u672c\u653b\u51fb\u7684\u9632\u5fa1\u6548\u679c\u5747\u4f18\u4e8e\u5176\u4ed6\u9884\u5904\u7406\u9632\u5fa1\u65b9\u6cd5\u3002 "
+ },
+ {
+ "name": "\u57fa\u4e8e\u6df1\u5ea6\u5b66\u4e60\u7684\u81ea\u7136\u8bed\u8a00\u5904\u7406\u653b\u9632\u7814\u7a76\u7efc\u8ff0",
+ "authors": [
+ "\u9a6c\u751c",
+ "\u5f20\u56fd\u6881",
+ "\u90ed\u6653\u519b"
+ ],
+ "affiliations": [
+ "\u897f\u85cf\u6c11\u65cf\u5927\u5b66\u4fe1\u606f\u5de5\u7a0b\u5b66\u9662"
+ ],
+ "abstract": "\u968f\u7740\u4eba\u5de5\u667a\u80fd\u7684\u53d1\u5c55\uff0c\u6df1\u5ea6\u5b66\u4e60\u6280\u672f\u5728\u81ea\u7136\u8bed\u8a00\u5904\u7406\uff08NLP\uff09\u9886\u57df\u5df2\u7ecf\u53d6\u5f97\u4e86\u663e\u8457\u8fdb\u6b65\u3002\u7136\u800c\uff0cNLP\u6a21\u578b\u8fd8\u5b58\u5728\u5b89\u5168\u6027\u6f0f\u6d1e\u3002\u6587\u7ae0\u5206\u6790\u4e86\u6df1\u5ea6\u5b66\u4e60\u5728NLP\u4e09\u5927\u6838\u5fc3\u4efb\u52a1\uff08\u5305\u62ec\u6587\u672c\u8868\u793a\u3001\u8bed\u5e8f\u5efa\u6a21\u548c\u77e5\u8bc6\u8868\u793a\uff09\u4e2d\u7684\u5e94\u7528\u73b0\u72b6\uff0c\u9488\u5bf9\u6587\u672c\u751f\u6210\u3001\u6587\u672c\u5206\u7c7b\u4ee5\u53ca\u8bed\u4e49\u89e3\u6790\u9762\u4e34\u7684\u653b\u51fb\u6280\u672f\uff0c\u63a2\u8ba8\u4e86\u5bf9\u6297\u6027\u8bad\u7ec3\u3001\u6b63\u5219\u5316\u6280\u672f\u3001\u6a21\u578b\u84b8\u998f\u7b49\u4e00\u7cfb\u5217\u9632\u5fa1\u6280\u672f\u5728\u5b9e\u9645\u5e94\u7528\u4e2d\u7684\u6548\u7528\u548c\u5c40\u9650\uff0c\u5e76\u901a\u8fc7\u6587\u672c\u5206\u7c7b\u4efb\u52a1\u7684\u5b9e\u8bc1\u7814\u7a76\u9a8c\u8bc1\u4e86\u96c6\u6210\u5bf9\u6297\u8bad\u7ec3\u7684\u6709\u6548\u6027\u3002"
+ },
+ {
+ "name": "\u4e00\u79cd\u57fa\u4e8e\u8f6e\u5ed3\u7a00\u758f\u5bf9\u6297\u7684\u89c6\u9891\u6b65\u6001\u9690\u79c1\u4fdd\u62a4\u7b97\u6cd5",
+ "authors": [
+ "\u8bb8\u53ef",
+ "\u674e\u5609\u6021",
+ "\u848b\u5174\u6d69",
+ "\u5b59\u952c\u950b"
+ ],
+ "affiliations": [
+ "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\u7f51\u7edc\u7a7a\u95f4\u5b89\u5168\u5b66\u9662"
+ ],
+ "abstract": "\u6df1\u5ea6\u7f51\u7edc\u6a21\u578b\u53ef\u4ee5\u4ece\u89c6\u9891\u6b65\u6001\u5e8f\u5217\u4e2d\u83b7\u53d6\u4eba\u4f53\u6b65\u6001\u751f\u7269\u7279\u5f81\u5e76\u8bc6\u522b\u4eba\u7269\u8eab\u4efd,\u9020\u6210\u4e25\u91cd\u7684\u9690\u79c1\u6cc4\u9732\u5b89\u5168\u5a01\u80c1\u3002\u73b0\u6709\u65b9\u6cd5\u4e00\u822c\u901a\u8fc7\u5bf9\u89c6\u9891\u753b\u9762\u4e2d\u7684\u4eba\u4f53\u8fdb\u884c\u6a21\u7cca\u3001\u53d8\u5f62\u7b49\u5904\u7406\u6765\u4fdd\u62a4\u9690\u79c1,\u8fd9\u4e9b\u65b9\u6cd5\u53ef\u4ee5\u5728\u4e00\u5b9a\u7a0b\u5ea6\u4e0a\u6539\u53d8\u4eba\u4f53\u5916\u89c2,\u4f46\u5f88\u96be\u6539\u53d8\u4eba\u7269\u884c\u8d70\u59ff\u6001,\u96be\u4ee5\u9003\u907f\u6df1\u5ea6\u7f51\u7edc\u6a21\u578b\u7684\u8bc6\u522b,\u4e14\u8fd9\u79cd\u5904\u7406\u5f80\u5f80\u4f34\u968f\u7740\u5bf9\u89c6\u9891\u8d28\u91cf\u7684\u4e25\u91cd\u7834\u574f,\u964d\u4f4e\u4e86\u89c6\u9891\u7684\u89c6\u89c9\u53ef\u7528\u6027\u3002\u9488\u5bf9\u8be5\u95ee\u9898,\u6587\u7ae0\u63d0\u51fa\u4e00\u79cd\u57fa\u4e8e\u8f6e\u5ed3\u7a00\u758f\u5bf9\u6297\u7684\u89c6\u9891\u6b65\u6001\u9690\u79c1\u4fdd\u62a4\u7b97\u6cd5,\u901a\u8fc7\u5bf9\u6b65\u6001\u8bc6\u522b\u6a21\u578b\u7684\u5bf9\u6297\u653b\u51fb\u6765\u8ba1\u7b97\u753b\u9762\u4e2d\u4eba\u4f53\u8f6e\u5ed3\u5468\u56f4\u7684\u6709\u6548\u4fee\u6539\u4f4d\u7f6e\u3002\u4e0e\u4f20\u7edf\u65b9\u6cd5\u76f8\u6bd4,\u5728\u5177\u6709\u76f8\u540c\u9690\u79c1\u4fdd\u62a4\u80fd\u529b\u7684\u60c5\u51b5\u4e0b,\u8be5\u7b97\u6cd5\u51cf\u5c11\u4e86\u5bf9\u753b\u9762\u7684\u4fee\u6539,\u5728\u9690\u79c1\u5b89\u5168\u6027\u548c\u89c6\u89c9\u53ef\u7528\u6027\u4e0a\u8fbe\u5230\u4e86\u8f83\u597d\u7684\u5747\u8861\u3002\u8be5\u7b97\u6cd5\u5728\u516c\u5f00\u6b65\u6001\u6570\u636e\u5e93CASIA-B\u548cOUMVLP\u4e0a\u5bf94\u79cd\u6b65\u6001\u8bc6\u522b\u6a21\u578b\u8fdb\u884c\u6d4b\u8bd5,\u901a\u8fc7\u4e0e\u4e0d\u540c\u6b65\u6001\u9690\u79c1\u4fdd\u62a4\u65b9\u6cd5\u5bf9\u6bd4,\u9a8c\u8bc1\u4e86\u8be5\u7b97\u6cd5\u5728\u6b65\u6001\u9690\u79c1\u4fdd\u62a4\u4e0a\u7684\u6709\u6548\u6027\u548c\u53ef\u7528\u6027\u3002"
+ }
+]
\ No newline at end of file
diff --git a/result_arxiv_knowledge_graph.json b/result_arxiv_knowledge_graph.json
new file mode 100644
index 0000000..7f5055f
--- /dev/null
+++ b/result_arxiv_knowledge_graph.json
@@ -0,0 +1 @@
+[{"name": "Solving Power Grid Optimization Problems with Rydberg Atoms", "authors": "Nora Bauer,K\u00fcbra Yeter-Aydeniz,Elias Kokkas,George Siopsis", "affiliations": "no", "abstract": "The rapid development of neutral atom quantum hardware provides a unique opportunity to design hardware-centered algorithms for solving real-world problems aimed at establishing quantum utility. In this work, we study the performance of two such algorithms on solving MaxCut problem for various weighted graphs. The first method uses a state-of-the-art machine learning tool to optimize the pulse shape and embedding of the graph using an adiabatic Ansatz to find the ground state. We tested the performance of this method on finding maximum power section task of the IEEE 9-bus power system and obtaining MaxCut of randomly generated problems of size up to 12 on the Aquila quantum processor. To the best of our knowledge, this work presents the first MaxCut results on Quera's Aquila quantum hardware. Our experiments run on Aquila demonstrate that even though the probability of obtaining the solution is reduced, one can still solve the MaxCut problem on cloud-accessed neutral atom quantum hardware. The second method uses local detuning, which is an emergent update on the Aquila hardware, to obtain a near exact realization of the standard QAOA Ansatz with similar performance. Finally, we study the fidelity throughout the time evolution realized in the adiabatic method as a benchmark for the IEEE 9-bus power grid graph state."}, {"name": "Towards Human Awareness in Robot Task Planning with Large Language Models", "authors": "Yuchen Liu,Luigi Palmieri,Sebastian Koch,Ilche Georgievski,Marco Aiello", "affiliations": "no", "abstract": "The recent breakthroughs in the research on Large Language Models (LLMs) have triggered a transformation across several research domains. Notably, the integration of LLMs has greatly enhanced performance in robot Task And Motion Planning (TAMP). However, previous approaches often neglect the consideration of dynamic environments, i.e., the presence of dynamic objects such as humans. In this paper, we propose a novel approach to address this gap by incorporating human awareness into LLM-based robot task planning. To obtain an effective representation of the dynamic environment, our approach integrates humans' information into a hierarchical scene graph. To ensure the plan's executability, we leverage LLMs to ground the environmental topology and actionable knowledge into formal planning language. Most importantly, we use LLMs to predict future human activities and plan tasks for the robot considering the predictions. Our contribution facilitates the development of integrating human awareness into LLM-driven robot task planning, and paves the way for proactive robot decision-making in dynamic environments."}, {"name": "EEG_GLT-Net: Optimising EEG Graphs for Real-time Motor Imagery Signals Classification", "authors": "Htoo Wai Aung,Jiao Jiao Li,Yang An,Steven W. Su", "affiliations": "no", "abstract": "Brain-Computer Interfaces connect the brain to external control devices, necessitating the accurate translation of brain signals such as from electroencephalography (EEG) into executable commands. Graph Neural Networks (GCN) have been increasingly applied for classifying EEG Motor Imagery signals, primarily because they incorporates the spatial relationships among EEG channels, resulting in improved accuracy over traditional convolutional methods. Recent advances by GCNs-Net in real-time EEG MI signal classification utilised Pearson Coefficient Correlation (PCC) for constructing adjacency matrices, yielding significant results on the PhysioNet dataset. Our paper introduces the EEG Graph Lottery Ticket (EEG_GLT) algorithm, an innovative technique for constructing adjacency matrices for EEG channels. It does not require pre-existing knowledge of inter-channel relationships, and it can be tailored to suit both individual subjects and GCN model architectures. Our findings demonstrated that the PCC method outperformed the Geodesic approach by 9.65% in mean accuracy, while our EEG_GLT matrix consistently exceeded the performance of the PCC method by a mean accuracy of 13.39%. Also, we found that the construction of the adjacency matrix significantly influenced accuracy, to a greater extent than GCN model configurations. A basic GCN configuration utilising our EEG_GLT matrix exceeded the performance of even the most complex GCN setup with a PCC matrix in average accuracy. Our EEG_GLT method also reduced MACs by up to 97% compared to the PCC method, while maintaining or enhancing accuracy. In conclusion, the EEG_GLT algorithm marks a breakthrough in the development of optimal adjacency matrices, effectively boosting both computational accuracy and efficiency, making it well-suited for real-time classification of EEG MI signals that demand intensive computational resources."}, {"name": "Graph Continual Learning with Debiased Lossless Memory Replay", "authors": "Chaoxi Niu,Guansong Pang,Ling Chen", "affiliations": "no", "abstract": "Real-life graph data often expands continually, rendering the learning of graph neural networks (GNNs) on static graph data impractical. Graph continual learning (GCL) tackles this problem by continually adapting GNNs to the expanded graph of the current task while maintaining the performance over the graph of previous tasks. Memory replay-based methods, which aim to replay data of previous tasks when learning new tasks, have been explored as one principled approach to mitigate the forgetting of the knowledge learned from the previous tasks. In this paper we extend this methodology with a novel framework, called Debiased Lossless Memory replay (DeLoMe). Unlike existing methods that sample nodes/edges of previous graphs to construct the memory, DeLoMe learns small lossless synthetic node representations as the memory. The learned memory can not only preserve the graph data privacy but also capture the holistic graph information, for which the sampling-based methods are not viable. Further, prior methods suffer from bias toward the current task due to the data imbalance between the classes in the memory data and the current data. A debiased GCL loss function is devised in DeLoMe to effectively alleviate this bias. Extensive experiments on four graph datasets show the effectiveness of DeLoMe under both class- and task-incremental learning settings."}, {"name": "Neuromorphic Vision-based Motion Segmentation with Graph Transformer Neural Network", "authors": "Yusra Alkendi,Rana Azzam,Sajid Javed,Lakmal Seneviratne,Yahya Zweiri", "affiliations": "no", "abstract": "Moving object segmentation is critical to interpret scene dynamics for robotic navigation systems in challenging environments. Neuromorphic vision sensors are tailored for motion perception due to their asynchronous nature, high temporal resolution, and reduced power consumption. However, their unconventional output requires novel perception paradigms to leverage their spatially sparse and temporally dense nature. In this work, we propose a novel event-based motion segmentation algorithm using a Graph Transformer Neural Network, dubbed GTNN. Our proposed algorithm processes event streams as 3D graphs by a series of nonlinear transformations to unveil local and global spatiotemporal correlations between events. Based on these correlations, events belonging to moving objects are segmented from the background without prior knowledge of the dynamic scene geometry. The algorithm is trained on publicly available datasets including MOD, EV-IMO, and \\textcolor{black}{EV-IMO2} using the proposed training scheme to facilitate efficient training on extensive datasets. Moreover, we introduce the Dynamic Object Mask-aware Event Labeling (DOMEL) approach for generating approximate ground-truth labels for event-based motion segmentation datasets. We use DOMEL to label our own recorded Event dataset for Motion Segmentation (EMS-DOMEL), which we release to the public for further research and benchmarking. Rigorous experiments are conducted on several unseen publicly-available datasets where the results revealed that GTNN outperforms state-of-the-art methods in the presence of dynamic background variations, motion patterns, and multiple dynamic objects with varying sizes and velocities. GTNN achieves significant performance gains with an average increase of 9.4% and 4.5% in terms of motion segmentation accuracy (IoU%) and detection rate (DR%), respectively."}, {"name": "Classical and Quantum Distributed Algorithms for the Survivable Network Design Problem", "authors": "Phillip Kerger,David E. Bernal Neira,Zoe Gonzalez Izquierdo,Eleanor G. Rieffel", "affiliations": "no", "abstract": "We investigate distributed classical and quantum approaches for the survivable network design problem (SNDP), sometimes called the generalized Steiner problem. These problems generalize many complex graph problems of interest, such as the traveling salesperson problem, the Steiner tree problem, and the k-connected network problem. To our knowledge, no classical or quantum algorithms for the SNDP have been formulated in the distributed settings we consider. We describe algorithms that are heuristics for the general problem but give concrete approximation bounds under specific parameterizations of the SNDP, which in particular hold for the three aforementioned problems that SNDP generalizes. We use a classical, centralized algorithmic framework first studied in (Goemans & Bertsimas 1993) and provide a distributed implementation thereof. Notably, we obtain asymptotic quantum speedups by leveraging quantum shortest path computations in this framework, generalizing recent work of (Kerger et al. 2023). These results raise the question of whether there is a separation between the classical and quantum models for application-scale instances of the problems considered."}]
\ No newline at end of file
diff --git a/t1.py b/t1.py
new file mode 100644
index 0000000..71cab14
--- /dev/null
+++ b/t1.py
@@ -0,0 +1,14 @@
+from serpapi import GoogleSearch
+# GoogleSearch
+params = {
+ "q": "Coffee",
+ "location": "Austin, Texas, United States",
+ "hl": "en",
+ "gl": "us",
+ "google_domain": "google.com",
+ "api_key": "681ac1d6fe9958124d39f25ea5afd759b63f45e52cac7e85629655024661166e"
+}
+
+search = GoogleSearch(params)
+results = search.get_dict()
+print(results)
diff --git a/te_u/arxiv.py b/te_u/arxiv.py
new file mode 100644
index 0000000..4161211
--- /dev/null
+++ b/te_u/arxiv.py
@@ -0,0 +1,150 @@
+import undetected_chromedriver as uc
+import time
+import random
+import json
+import matplotlib.pyplot as plt # 数据可视化
+import jieba # 词语切割
+import wordcloud # 分词
+from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS # 词云,颜色生成器,停止词
+import numpy as np # 科学计算
+from PIL import Image # 处理图片
+from bs4 import BeautifulSoup
+from lxml import etree
+
+
+# def get_current_page_result(driver):
+# """ 采集一页里的所有item """
+# result_area = driver.find_element(by="id", value="ModuleSearchResult")
+# current_page_results = result_area.find_elements(by="xpath", value='//tbody/tr')
+#
+# names = [r.find_element(by="xpath", value='td[@class="name"]') for r in current_page_results]
+# links = [r.find_element(by="xpath", value='td[@class="name"]/a').get_attribute("href") for r in current_page_results]
+#
+# items = get_items(driver, links)
+# return items
+
+
+def get_items(driver, links):
+ items = []
+ for i, l in enumerate(links):
+ item = get_item(driver, l)
+ items.append(item)
+ return items
+
+
+def get_item(driver, link):
+ item = {}
+ driver.get(link) # 获取新的论文链接
+ time.sleep(5 + 3 * random.random()) # 等等加载完成
+
+ # 标题
+ name = driver.find_element(by="xpath", value='//h1[contains(@class, "title")]').text
+ item["name"] = name
+
+ # 作者
+ names_element = driver.find_elements(by="xpath", value='//div[@class="authors"]//a')
+ names = [n_ele.text for n_ele in names_element]
+ item["authors"] = ",".join(names)
+
+ # 单位
+ item["affiliations"] = "no"
+
+ # 摘要
+ # 如果有更多,先点更多
+ # try:
+ # more_bn = driver.find_element(by="id", value="ChDivSummaryMore")
+ # more_bn.click()
+ # time.sleep(1 + 1 * random.random()) # 等等加载完成
+ # except:
+ # more_bn = None
+
+ abstract_area = driver.find_element(by="xpath", value='//blockquote[contains(@class, "abstract")]')
+ abstract = abstract_area.text
+ item["abstract"] = abstract
+
+ return item
+
+
+def get_links_etree(driver):
+ dom = etree.HTML(driver.page_source)
+ links = dom.xpath('//ol[@class="breathe-horizontal"]/li/div/p/a/@href')
+ return links
+
+
+def get_news_from_arxiv(total_num, keyword):
+ keyword = [i.strip() for i in keyword.strip().split()]
+ url = f"https://arxiv.org/search/?query={'+'.join(keyword)}&searchtype=all&source=header"
+ driver = uc.Chrome()
+ driver.get(url)
+ # time.sleep(3 + 2 * random.random()) # 等等加载完成
+ # # 搜索
+ # input_button = driver.find_element(by="id", value="txt_SearchText")
+ # input_button.send_keys(keyword)
+ # time.sleep(1 + 1 * random.random()) # 等等加载完成
+ #
+ # search_bn = driver.find_element(by="xpath", value='//input[@class="search-btn"]')
+ # search_bn.click()
+ time.sleep(5 + 3 * random.random()) # 等等加载完成
+
+ # 获取相应的链接
+ links = []
+ stop_flag = False
+
+ while not stop_flag:
+ link_current_page = get_links_etree(driver)
+ links.extend(link_current_page)
+
+ if len(links) < total_num:
+ # 下一页
+ try:
+ next_page_btn = driver.find_element(by="xpath", value='//a[@class="pagination-next"]')
+ next_page_btn.click()
+ time.sleep(2 + 2 * random.random()) # 等等加载完成
+ # driver.refresh()
+ # time.sleep(2 + 2 * random.random()) # 等等加载完成
+ except Exception as e:
+ print("没有下一页,返回当前的采集的所有结果", e)
+ stop_flag = True
+ total_num = len(links)
+ else:
+ # 超过了需要的连接数就停止
+ stop_flag = True
+
+ links = links[:total_num]
+
+ results = get_items(driver, links)
+
+ with open(f"result_arxiv_{'_'.join(keyword)}.json", "w", encoding="utf8") as f:
+ f.write(json.dumps(results))
+
+ driver.close()
+ return results
+
+
+def get_clouds(word_list):
+ text = ",".join(word_list)
+ wordlist = jieba.lcut(text) # 切割词语
+ space_list = ' '.join(wordlist) # 空格链接词语
+ # backgroud = np.array(Image.open('test1.jpg'))
+
+ wc = WordCloud(width=400, height=300,
+ background_color='white',
+ mode='RGB',
+ # mask=backgroud, # 添加蒙版,生成指定形状的词云,并且词云图的颜色可从蒙版里提取
+ max_words=200,
+ stopwords=STOPWORDS.update(('in', "of", "for")), # 内置的屏蔽词,并添加自己设置的词语
+ font_path='C:\Windows\Fonts\STZHONGS.ttf',
+ max_font_size=100,
+ relative_scaling=0.6, # 设置字体大小与词频的关联程度为0.4
+ random_state=50,
+ scale=2
+ ).generate(space_list)
+
+ # image_color = ImageColorGenerator(backgroud) # 设置生成词云的颜色,如去掉这两行则字体为默认颜色
+ # wc.recolor(color_func=image_color)
+
+ return wc.to_array()
+
+
+if __name__ == '__main__':
+ get_news_from_arxiv(5, "knowledge graph")
diff --git a/te_u/paper_down_load/csv/ECCV_2022.csv b/te_u/paper_down_load/csv/ECCV_2022.csv
new file mode 100644
index 0000000..5b95b71
--- /dev/null
+++ b/te_u/paper_down_load/csv/ECCV_2022.csv
@@ -0,0 +1,1646 @@
+title,main link,supplemental link
+learning-depth-from-focus-in-the-wild,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610001-supp.pdf
+learning-based-point-cloud-registration-for-6d-object-pose-estimation-in-the-real-world,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610018.pdf,
+an-end-to-end-transformer-model-for-crowd-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610037.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610037-supp.pdf
+few-shot-single-view-3d-reconstruction-with-memory-prior-contrastive-network,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610054.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610054-supp.pdf
+did-m3d-decoupling-instance-depth-for-monocular-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610071.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610071-supp.pdf
+adaptive-co-teaching-for-unsupervised-monocular-depth-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610089.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610089-supp.pdf
+fusing-local-similarities-for-retrieval-based-3d-orientation-estimation-of-unseen-objects,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610106.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610106-supp.pdf
+lidar-point-cloud-guided-monocular-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610123.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610123-supp.pdf
+structural-causal-3d-reconstruction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610140.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610140-supp.pdf
+3d-human-pose-estimation-using-mobius-graph-convolutional-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610158.pdf,
+learning-to-train-a-point-cloud-reconstruction-network-without-matching,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610177.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610177-supp.pdf
+panoformer-panorama-transformer-for-indoor-360deg-depth-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610193.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610193-supp.pdf
+self-supervised-human-mesh-recovery-with-cross-representation-alignment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610210.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610210-supp.pdf
+alignsdf-pose-aligned-signed-distance-fields-for-hand-object-reconstruction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610229.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610229-supp.zip
+a-reliable-online-method-for-joint-estimation-of-focal-length-and-camera-rotation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610247.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610247-supp.pdf
+ps-nerf-neural-inverse-rendering-for-multi-view-photometric-stereo,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610263.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610263-supp.pdf
+share-with-thy-neighbors-single-view-reconstruction-by-cross-instance-consistency,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610282.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610282-supp.pdf
+towards-comprehensive-representation-enhancement-in-semantics-guided-self-supervised-monocular-depth-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610299.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610299-supp.zip
+avatarcap-animatable-avatar-conditioned-monocular-human-volumetric-capture,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610317.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610317-supp.pdf
+cross-attention-of-disentangled-modalities-for-3d-human-mesh-recovery-with-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610336.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610336-supp.pdf
+georefine-self-supervised-online-depth-refinement-for-accurate-dense-mapping,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610354.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610354-supp.pdf
+multi-modal-masked-pre-training-for-monocular-panoramic-depth-completion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610372.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610372-supp.pdf
+gitnet-geometric-prior-based-transformation-for-birds-eye-view-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610390.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610390-supp.pdf
+learning-visibility-for-robust-dense-human-body-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610406.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610406-supp.pdf
+towards-high-fidelity-single-view-holistic-reconstruction-of-indoor-scenes,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610423.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610423-supp.pdf
+compnvs-novel-view-synthesis-with-scene-completion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610441.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610441-supp.pdf
+sketchsampler-sketch-based-3d-reconstruction-via-view-dependent-depth-sampling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610457.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610457-supp.pdf
+localbins-improving-depth-estimation-by-learning-local-distributions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610473.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610473-supp.pdf
+2d-gans-meet-unsupervised-single-view-3d-reconstruction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610490.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610490-supp.pdf
+infinitenature-zero-learning-perpetual-view-generation-of-natural-scenes-from-single-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610508.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610508-supp.pdf
+semi-supervised-single-view-3d-reconstruction-via-prototype-shape-priors,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610528.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610528-supp.pdf
+bilateral-normal-integration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610545.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610545-supp.pdf
+s2contact-graph-based-network-for-3d-hand-object-contact-estimation-with-semi-supervised-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610561.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610561-supp.pdf
+sc-wls-towards-interpretable-feed-forward-camera-re-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610578.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610578-supp.pdf
+floatingfusion-depth-from-tof-and-image-stabilized-stereo-cameras,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610595.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610595-supp.pdf
+deltar-depth-estimation-from-a-light-weight-tof-sensor-and-rgb-image,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610612.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610612-supp.zip
+3d-room-layout-estimation-from-a-cubemap-of-panorama-image-via-deep-manhattan-hough-transform,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610630.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610630-supp.pdf
+rbp-pose-residual-bounding-box-projection-for-category-level-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610647.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610647-supp.pdf
+monocular-3d-object-reconstruction-with-gan-inversion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610665.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610665-supp.pdf
+map-free-visual-relocalization-metric-pose-relative-to-a-single-image,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610682.pdf,
+self-distilled-feature-aggregation-for-self-supervised-monocular-depth-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610700.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610700-supp.pdf
+planes-vs-chairs-category-guided-3d-shape-learning-without-any-3d-cues,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610717.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136610717-supp.pdf
+mhr-net-multiple-hypothesis-reconstruction-of-non-rigid-shapes-from-2d-views,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620001-supp.pdf
+depth-map-decomposition-for-monocular-depth-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620018.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620018-supp.pdf
+monitored-distillation-for-positive-congruent-depth-completion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620035.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620035-supp.pdf
+resolution-free-point-cloud-sampling-network-with-data-distillation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620053.pdf,
+organic-priors-in-non-rigid-structure-from-motion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620069.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620069-supp.pdf
+perspective-flow-aggregation-for-data-limited-6d-object-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620087.pdf,
+danbo-disentangled-articulated-neural-body-representations-via-graph-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620104.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620104-supp.pdf
+chore-contact-human-and-object-reconstruction-from-a-single-rgb-image,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620121.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620121-supp.pdf
+learned-vertex-descent-a-new-direction-for-3d-human-model-fitting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620141.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620141-supp.pdf
+self-calibrating-photometric-stereo-by-neural-inverse-rendering,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620160.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620160-supp.pdf
+3d-clothed-human-reconstruction-in-the-wild,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620177.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620177-supp.pdf
+directed-ray-distance-functions-for-3d-scene-reconstruction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620193.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620193-supp.pdf
+object-level-depth-reconstruction-for-category-level-6d-object-pose-estimation-from-monocular-rgb-image,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620212.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620212-supp.pdf
+uncertainty-quantification-in-depth-estimation-via-constrained-ordinal-regression,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620229.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620229-supp.pdf
+costdcnet-cost-volume-based-depth-completion-for-a-single-rgb-d-image,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620248.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620248-supp.pdf
+shapo-implicit-representations-for-multi-object-shape-appearance-and-pose-optimization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620266.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620266-supp.zip
+3d-siamese-transformer-network-for-single-object-tracking-on-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620284.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620284-supp.pdf
+object-wake-up-3d-object-rigging-from-a-single-image,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620302.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620302-supp.pdf
+integratedpifu-integrated-pixel-aligned-implicit-function-for-single-view-human-reconstruction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620319.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620319-supp.pdf
+realistic-one-shot-mesh-based-head-avatars,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620336.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620336-supp.pdf
+a-kendall-shape-space-approach-to-3d-shape-estimation-from-2d-landmarks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620354.pdf,
+neural-light-field-estimation-for-street-scenes-with-differentiable-virtual-object-insertion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620370.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620370-supp.pdf
+perspective-phase-angle-model-for-polarimetric-3d-reconstruction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620387.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620387-supp.zip
+deepshadow-neural-shape-from-shadow,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620403.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620403-supp.pdf
+camera-auto-calibration-from-the-steiner-conic-of-the-fundamental-matrix,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620419.pdf,
+super-resolution-3d-human-shape-from-a-single-low-resolution-image,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620435.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620435-supp.pdf
+minimal-neural-atlas-parameterizing-complex-surfaces-with-minimal-charts-and-distortion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620452.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620452-supp.pdf
+extrudenet-unsupervised-inverse-sketch-and-extrude-for-shape-parsing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620468.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620468-supp.pdf
+catre-iterative-point-clouds-alignment-for-category-level-object-pose-refinement,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620485.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620485-supp.pdf
+optimization-over-disentangled-encoding-unsupervised-cross-domain-point-cloud-completion-via-occlusion-factor-manipulation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620504.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620504-supp.zip
+unsupervised-learning-of-3d-semantic-keypoints-with-mutual-reconstruction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620521.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620521-supp.pdf
+mvdecor-multi-view-dense-correspondence-learning-for-fine-grained-3d-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620538.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620538-supp.pdf
+supr-a-sparse-unified-part-based-human-representation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620555.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620555-supp.pdf
+revisiting-point-cloud-simplification-a-learnable-feature-preserving-approach,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620573.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620573-supp.pdf
+masked-autoencoders-for-point-cloud-self-supervised-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620591.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620591-supp.pdf
+intrinsic-neural-fields-learning-functions-on-manifolds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620609.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620609-supp.zip
+skeleton-free-pose-transfer-for-stylized-3d-characters,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620627.pdf,
+masked-discrimination-for-self-supervised-learning-on-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620645.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620645-supp.pdf
+fbnet-feedback-network-for-point-cloud-completion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620664.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620664-supp.pdf
+meta-sampler-almost-universal-yet-task-oriented-sampling-for-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620682.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620682-supp.pdf
+a-level-set-theory-for-neural-implicit-evolution-under-explicit-flows,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620699.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620699-supp.pdf
+efficient-point-cloud-analysis-using-hilbert-curve,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620717.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136620717-supp.pdf
+toch-spatio-temporal-object-to-hand-correspondence-for-motion-refinement,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630001-supp.zip
+laterf-label-and-text-driven-object-radiance-fields,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630021.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630021-supp.pdf
+meshmae-masked-autoencoders-for-3d-mesh-data-analysis,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630038.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630038-supp.pdf
+unsupervised-deep-multi-shape-matching,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630056.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630056-supp.pdf
+texturify-generating-textures-on-3d-shape-surfaces,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630073.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630073-supp.zip
+autoregressive-3d-shape-generation-via-canonical-mapping,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630091.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630091-supp.pdf
+pointtree-transformation-robust-point-cloud-encoder-with-relaxed-k-d-trees,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630107.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630107-supp.pdf
+unif-united-neural-implicit-functions-for-clothed-human-reconstruction-and-animation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630123.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630123-supp.pdf
+prif-primary-ray-based-implicit-function,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630140.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630140-supp.pdf
+point-cloud-domain-adaptation-via-masked-local-3d-structure-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630159.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630159-supp.pdf
+clip-actor-text-driven-recommendation-and-stylization-for-animating-human-meshes,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630176.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630176-supp.pdf
+planeformers-from-sparse-view-planes-to-3d-reconstruction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630194.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630194-supp.pdf
+learning-implicit-templates-for-point-based-clothed-human-modeling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630211.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630211-supp.zip
+exploring-the-devil-in-graph-spectral-domain-for-3d-point-cloud-attacks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630230.pdf,
+structure-aware-editable-morphable-model-for-3d-facial-detail-animation-and-manipulation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630248.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630248-supp.zip
+mofanerf-morphable-facial-neural-radiance-field,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630267.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630267-supp.zip
+pointinst3d-segmenting-3d-instances-by-points,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630284.pdf,
+cross-modal-3d-shape-generation-and-manipulation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630300.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630300-supp.pdf
+latent-partition-implicit-with-surface-codes-for-3d-representation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630318.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630318-supp.pdf
+implicit-field-supervision-for-robust-non-rigid-shape-matching,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630338.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630338-supp.pdf
+learning-self-prior-for-mesh-denoising-using-dual-graph-convolutional-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630358.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630358-supp.pdf
+diffconv-analyzing-irregular-point-clouds-with-an-irregular-view,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630375.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630375-supp.zip
+pd-flow-a-point-cloud-denoising-framework-with-normalizing-flows,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630392.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630392-supp.pdf
+seedformer-patch-seeds-based-point-cloud-completion-with-upsample-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630409.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630409-supp.pdf
+deepmend-learning-occupancy-functions-to-represent-shape-for-repair,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630426.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630426-supp.pdf
+a-repulsive-force-unit-for-garment-collision-handling-in-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630444.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630444-supp.pdf
+shape-pose-disentanglement-using-se-3-equivariant-vector-neurons,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630461.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630461-supp.zip
+3d-equivariant-graph-implicit-functions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630477.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630477-supp.pdf
+patchrd-detail-preserving-shape-completion-by-learning-patch-retrieval-and-deformation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630494.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630494-supp.pdf
+3d-shape-sequence-of-human-comparison-and-classification-using-current-and-varifolds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630514.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630514-supp.zip
+conditional-flow-nerf-accurate-3d-modelling-with-reliable-uncertainty-quantification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630531.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630531-supp.zip
+unsupervised-pose-aware-part-decomposition-for-man-made-articulated-objects,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630549.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630549-supp.pdf
+meshudf-fast-and-differentiable-meshing-of-unsigned-distance-field-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630566.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630566-supp.pdf
+spe-net-boosting-point-cloud-analysis-via-rotation-robustness-enhancement,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630582.pdf,
+the-shape-part-slot-machine-contact-based-reasoning-for-generating-3d-shapes-from-parts,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630599.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630599-supp.pdf
+spatiotemporal-self-attention-modeling-with-temporal-patch-shift-for-action-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630615.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630615-supp.pdf
+proposal-free-temporal-action-detection-via-global-segmentation-mask-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630632.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630632-supp.pdf
+semi-supervised-temporal-action-detection-with-proposal-free-masking,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630649.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630649-supp.pdf
+zero-shot-temporal-action-detection-via-vision-language-prompting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630667.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630667-supp.pdf
+cycda-unsupervised-cycle-domain-adaptation-to-learn-from-image-to-video,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630684.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630684-supp.pdf
+s2n-suppression-strengthen-network-for-event-based-recognition-under-variant-illuminations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630701.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630701-supp.pdf
+cmd-self-supervised-3d-action-representation-learning-with-cross-modal-mutual-distillation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630719.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136630719-supp.pdf
+expanding-language-image-pretrained-models-for-general-video-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640001-supp.pdf
+hunting-group-clues-with-transformers-for-social-group-activity-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640018.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640018-supp.pdf
+contrastive-positive-mining-for-unsupervised-3d-action-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640035.pdf,
+target-absent-human-attention,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640051.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640051-supp.pdf
+uncertainty-based-spatial-temporal-attention-for-online-action-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640068.pdf,
+iwin-human-object-interaction-detection-via-transformer-with-irregular-windows,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640085.pdf,
+rethinking-zero-shot-action-recognition-learning-from-latent-atomic-actions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640102.pdf,
+mining-cross-person-cues-for-body-part-interactiveness-learning-in-hoi-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640119.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640119-supp.pdf
+collaborating-domain-shared-and-target-specific-feature-clustering-for-cross-domain-3d-action-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640135.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640135-supp.pdf
+is-appearance-free-action-recognition-possible,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640154.pdf,
+learning-spatial-preserved-skeleton-representations-for-few-shot-action-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640172.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640172-supp.pdf
+dual-evidential-learning-for-weakly-supervised-temporal-action-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640190.pdf,
+global-local-motion-transformer-for-unsupervised-skeleton-based-action-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640207.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640207-supp.pdf
+adafocusv3-on-unified-spatial-temporal-dynamic-video-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640224.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640224-supp.pdf
+panoramic-human-activity-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640242.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640242-supp.pdf
+delving-into-details-synopsis-to-detail-networks-for-video-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640259.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640259-supp.pdf
+a-generalized-robust-framework-for-timestamp-supervision-in-temporal-action-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640276.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640276-supp.pdf
+few-shot-action-recognition-with-hierarchical-matching-and-contrastive-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640293.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640293-supp.pdf
+privhar-recognizing-human-actions-from-privacy-preserving-lens,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640310.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640310-supp.zip
+scale-aware-spatio-temporal-relation-learning-for-video-anomaly-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640328.pdf,
+compound-prototype-matching-for-few-shot-action-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640346.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640346-supp.pdf
+continual-3d-convolutional-neural-networks-for-real-time-processing-of-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640364.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640364-supp.pdf
+dynamic-spatio-temporal-specialization-learning-for-fine-grained-action-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640381.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640381-supp.pdf
+dynamic-local-aggregation-network-with-adaptive-clusterer-for-anomaly-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640398.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640398-supp.pdf
+action-quality-assessment-with-temporal-parsing-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640416.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640416-supp.pdf
+entry-flipped-transformer-for-inference-and-prediction-of-participant-behavior,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640433.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640433-supp.zip
+pairwise-contrastive-learning-network-for-action-quality-assessment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640450.pdf,
+geometric-features-informed-multi-person-human-object-interaction-recognition-in-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640467.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640467-supp.pdf
+actionformer-localizing-moments-of-actions-with-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640485.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640485-supp.pdf
+socialvae-human-trajectory-prediction-using-timewise-latents,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640504.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640504-supp.pdf
+shape-matters-deformable-patch-attack,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640522.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640522-supp.pdf
+frequency-domain-model-augmentation-for-adversarial-attack,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640543.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640543-supp.pdf
+prior-guided-adversarial-initialization-for-fast-adversarial-training,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640560.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640560-supp.pdf
+enhanced-accuracy-and-robustness-via-multi-teacher-adversarial-distillation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640577.pdf,
+lgv-boosting-adversarial-example-transferability-from-large-geometric-vicinity,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640594.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640594-supp.pdf
+a-large-scale-multiple-objective-method-for-black-box-attack-against-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640611.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640611-supp.pdf
+gradauto-energy-oriented-attack-on-dynamic-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640628.pdf,
+a-spectral-view-of-randomized-smoothing-under-common-corruptions-benchmarking-and-improving-certified-robustness,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640645.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640645-supp.pdf
+improving-adversarial-robustness-of-3d-point-cloud-classification-models,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640663.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640663-supp.pdf
+learning-extremely-lightweight-and-robust-model-with-differentiable-constraints-on-sparsity-and-condition-number,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640679.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640679-supp.pdf
+ribac-towards-robust-and-imperceptible-backdoor-attack-against-compact-dnn,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640697.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640697-supp.pdf
+boosting-transferability-of-targeted-adversarial-examples-via-hierarchical-generative-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640714.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136640714-supp.pdf
+adaptive-image-transformations-for-transfer-based-adversarial-attack,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650001-supp.pdf
+generative-multiplane-images-making-a-2d-gan-3d-aware,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650019.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650019-supp.pdf
+advdo-realistic-adversarial-attacks-for-trajectory-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650036.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650036-supp.pdf
+adversarial-contrastive-learning-via-asymmetric-infonce,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650053.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650053-supp.pdf
+one-size-does-not-fit-all-data-adaptive-adversarial-training,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650070.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650070-supp.pdf
+unicr-universally-approximated-certified-robustness-via-randomized-smoothing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650086.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650086-supp.pdf
+hardly-perceptible-trojan-attack-against-neural-networks-with-bit-flips,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650103.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650103-supp.pdf
+robust-network-architecture-search-via-feature-distortion-restraining,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650120.pdf,
+secretgen-privacy-recovery-on-pre-trained-models-via-distribution-discrimination,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650137.pdf,
+triangle-attack-a-query-efficient-decision-based-adversarial-attack,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650153.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650153-supp.pdf
+data-free-backdoor-removal-based-on-channel-lipschitzness,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650171.pdf,
+black-box-dissector-towards-erasing-based-hard-label-model-stealing-attack,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650188.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650188-supp.pdf
+learning-energy-based-models-with-adversarial-training,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650204.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650204-supp.pdf
+adversarial-label-poisoning-attack-on-graph-neural-networks-via-label-propagation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650223.pdf,
+revisiting-outer-optimization-in-adversarial-training,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650240.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650240-supp.pdf
+zero-shot-attribute-attacks-on-fine-grained-recognition-models,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650257.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650257-supp.pdf
+towards-effective-and-robust-neural-trojan-defenses-via-input-filtering,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650277.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650277-supp.pdf
+scaling-adversarial-training-to-large-perturbation-bounds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650295.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650295-supp.pdf
+exploiting-the-local-parabolic-landscapes-of-adversarial-losses-to-accelerate-black-box-adversarial-attack,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650311.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650311-supp.pdf
+generative-domain-adaptation-for-face-anti-spoofing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650328.pdf,
+metagait-learning-to-learn-an-omni-sample-adaptive-representation-for-gait-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650350.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650350-supp.pdf
+gaitedge-beyond-plain-end-to-end-gait-recognition-for-better-practicality,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650368.pdf,
+uia-vit-unsupervised-inconsistency-aware-method-based-on-vision-transformer-for-face-forgery-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650384.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650384-supp.pdf
+effective-presentation-attack-detection-driven-by-face-related-task,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650400.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650400-supp.pdf
+ppt-token-pruned-pose-transformer-for-monocular-and-multi-view-human-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650416.pdf,
+avatarposer-articulated-full-body-pose-tracking-from-sparse-motion-sensing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650434.pdf,
+p-stmo-pre-trained-spatial-temporal-many-to-one-model-for-3d-human-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650453.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650453-supp.pdf
+d-d-learning-human-dynamics-from-dynamic-camera,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650470.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650470-supp.pdf
+explicit-occlusion-reasoning-for-multi-person-3d-human-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650488.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650488-supp.pdf
+couch-towards-controllable-human-chair-interactions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650508.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650508-supp.pdf
+identity-aware-hand-mesh-estimation-and-personalization-from-rgb-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650526.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650526-supp.zip
+c3p-cross-domain-pose-prior-propagation-for-weakly-supervised-3d-human-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650544.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650544-supp.pdf
+pose-ndf-modeling-human-pose-manifolds-with-neural-distance-fields,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650562.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650562-supp.pdf
+cliff-carrying-location-information-in-full-frames-into-human-pose-and-shape-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650580.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650580-supp.pdf
+deciwatch-a-simple-baseline-for-10x-efficient-2d-and-3d-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650597.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650597-supp.pdf
+smoothnet-a-plug-and-play-network-for-refining-human-poses-in-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650615.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650615-supp.pdf
+posetrans-a-simple-yet-effective-pose-transformation-augmentation-for-human-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650633.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650633-supp.pdf
+multi-person-3d-pose-and-shape-estimation-via-inverse-kinematics-and-refinement,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650650.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650650-supp.pdf
+overlooked-poses-actually-make-sense-distilling-privileged-knowledge-for-human-motion-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650668.pdf,
+structural-triangulation-a-closed-form-solution-to-constrained-3d-human-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650685.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650685-supp.pdf
+audio-driven-stylized-gesture-generation-with-flow-based-model,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650701.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650701-supp.zip
+self-constrained-inference-optimization-on-structural-groups-for-human-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650718.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136650718-supp.pdf
+unrealego-a-new-dataset-for-robust-egocentric-3d-human-motion-capture,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660001-supp.pdf
+skeleton-parted-graph-scattering-networks-for-3d-human-motion-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660018.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660018-supp.pdf
+rethinking-keypoint-representations-modeling-keypoints-and-poses-as-objects-for-multi-person-human-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660036.pdf,
+virtualpose-learning-generalizable-3d-human-pose-models-from-virtual-data,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660054.pdf,
+poseur-direct-human-pose-regression-with-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660071.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660071-supp.pdf
+simcc-a-simple-coordinate-classification-perspective-for-human-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660088.pdf,
+regularizing-vector-embedding-in-bottom-up-human-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660105.pdf,
+a-visual-navigation-perspective-for-category-level-object-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660121.pdf,
+faster-voxelpose-real-time-3d-human-pose-estimation-by-orthographic-projection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660139.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660139-supp.zip
+learning-to-fit-morphable-models,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660156.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660156-supp.pdf
+egobody-human-body-shape-and-motion-of-interacting-people-from-head-mounted-devices,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660176.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660176-supp.pdf
+graspd-differentiable-contact-rich-grasp-synthesis-for-multi-fingered-hands,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660197.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660197-supp.zip
+autoavatar-autoregressive-neural-fields-for-dynamic-avatar-modeling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660216.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660216-supp.zip
+deep-radial-embedding-for-visual-sequence-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660234.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660234-supp.pdf
+saga-stochastic-whole-body-grasping-with-contact,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660251.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660251-supp.pdf
+neural-capture-of-animatable-3d-human-from-monocular-video,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660269.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660269-supp.zip
+general-object-pose-transformation-network-from-unpaired-data,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660286.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660286-supp.pdf
+compositional-human-scene-interaction-synthesis-with-semantic-control,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660305.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660305-supp.pdf
+pressurevision-estimating-hand-pressure-from-a-single-rgb-image,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660322.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660322-supp.pdf
+posescript-3d-human-poses-from-natural-language,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660340.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660340-supp.zip
+dprost-dynamic-projective-spatial-transformer-network-for-6d-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660357.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660357-supp.pdf
+3d-interacting-hand-pose-estimation-by-hand-de-occlusion-and-removal,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660374.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660374-supp.pdf
+pose-for-everything-towards-category-agnostic-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660391.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660391-supp.pdf
+posegpt-quantization-based-3d-human-motion-generation-and-forecasting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660409.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660409-supp.zip
+dh-aug-dh-forward-kinematics-model-driven-augmentation-for-3d-human-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660427.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660427-supp.pdf
+estimating-spatially-varying-lighting-in-urban-scenes-with-disentangled-representation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660445.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660445-supp.pdf
+boosting-event-stream-super-resolution-with-a-recurrent-neural-network,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660461.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660461-supp.zip
+projective-parallel-single-pixel-imaging-to-overcome-global-illumination-in-3d-structure-light-scanning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660479.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660479-supp.pdf
+semantic-sparse-colorization-network-for-deep-exemplar-based-colorization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660495.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660495-supp.pdf
+practical-and-scalable-desktop-based-high-quality-facial-capture,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660512.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660512-supp.zip
+fast-vqa-efficient-end-to-end-video-quality-assessment-with-fragment-sampling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660528.pdf,
+physically-based-editing-of-indoor-scene-lighting-from-a-single-image,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660545.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660545-supp.pdf
+lednet-joint-low-light-enhancement-and-deblurring-in-the-dark,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660562.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660562-supp.pdf
+mpib-an-mpi-based-bokeh-rendering-framework-for-realistic-partial-occlusion-effects,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660579.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660579-supp.pdf
+real-rawvsr-real-world-raw-video-super-resolution-with-a-benchmark-dataset,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660597.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660597-supp.pdf
+transform-your-smartphone-into-a-dslr-camera-learning-the-isp-in-the-wild,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660614.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660614-supp.pdf
+learning-deep-non-blind-image-deconvolution-without-ground-truths,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660631.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660631-supp.pdf
+nest-neural-event-stack-for-event-based-image-enhancement,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660649.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660649-supp.pdf
+editable-indoor-lighting-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660666.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660666-supp.pdf
+fast-two-step-blind-optical-aberration-correction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660682.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660682-supp.pdf
+seeing-far-in-the-dark-with-patterned-flash,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660698.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660698-supp.pdf
+pseudoclick-interactive-image-segmentation-with-click-imitation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660717.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136660717-supp.pdf
+ct2-colorization-transformer-via-color-tokens,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670001-supp.pdf
+simple-baselines-for-image-restoration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670017.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670017-supp.pdf
+spike-transformer-monocular-depth-estimation-for-spiking-camera,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670034.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670034-supp.pdf
+improving-image-restoration-by-revisiting-global-information-aggregation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670053.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670053-supp.pdf
+data-association-between-event-streams-and-intensity-frames-under-diverse-baselines,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670071.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670071-supp.pdf
+d2hnet-joint-denoising-and-deblurring-with-hierarchical-network-for-robust-night-image-restoration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670089.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670089-supp.pdf
+learning-graph-neural-networks-for-image-style-transfer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670108.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670108-supp.pdf
+deepps2-revisiting-photometric-stereo-using-two-differently-illuminated-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670125.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670125-supp.pdf
+instance-contour-adjustment-via-structure-driven-cnn,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670142.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670142-supp.pdf
+synthesizing-light-field-video-from-monocular-video,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670158.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670158-supp.zip
+human-centric-image-cropping-with-partition-aware-and-content-preserving-features,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670176.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670176-supp.pdf
+demfi-deep-joint-deblurring-and-multi-frame-interpolation-with-flow-guided-attentive-correlation-and-recursive-boosting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670193.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670193-supp.pdf
+neural-image-representations-for-multi-image-fusion-and-layer-separation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670210.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670210-supp.pdf
+bringing-rolling-shutter-images-alive-with-dual-reversed-distortion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670227.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670227-supp.zip
+film-frame-interpolation-for-large-motion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670244.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670244-supp.pdf
+video-interpolation-by-event-driven-anisotropic-adjustment-of-optical-flow,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670261.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670261-supp.zip
+evac3d-from-event-based-apparent-contours-to-3d-models-via-continuous-visual-hulls,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670278.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670278-supp.pdf
+dccf-deep-comprehensible-color-filter-learning-framework-for-high-resolution-image-harmonization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670294.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670294-supp.pdf
+selectionconv-convolutional-neural-networks-for-non-rectilinear-image-data,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670310.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670310-supp.pdf
+spatial-separated-curve-rendering-network-for-efficient-and-high-resolution-image-harmonization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670327.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670327-supp.pdf
+bigcolor-colorization-using-a-generative-color-prior-for-natural-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670343.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670343-supp.pdf
+cadyq-content-aware-dynamic-quantization-for-image-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670360.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670360-supp.pdf
+deep-semantic-statistics-matching-d2sm-denoising-network,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670377.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670377-supp.zip
+3d-scene-inference-from-transient-histograms,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670394.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670394-supp.pdf
+neural-space-filling-curves,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670412.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670412-supp.pdf
+exposure-aware-dynamic-weighted-learning-for-single-shot-hdr-imaging,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670429.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670429-supp.pdf
+seeing-through-a-black-box-toward-high-quality-terahertz-imaging-via-subspace-and-attention-guided-restoration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670447.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670447-supp.pdf
+tomography-of-turbulence-strength-based-on-scintillation-imaging,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670464.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670464-supp.zip
+realistic-blur-synthesis-for-learning-image-deblurring,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670481.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670481-supp.pdf
+learning-phase-mask-for-privacy-preserving-passive-depth-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670497.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670497-supp.pdf
+lwgnet-learned-wirtinger-gradients-for-fourier-ptychographic-phase-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670515.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670515-supp.pdf
+pandora-polarization-aided-neural-decomposition-of-radiance,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670531.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670531-supp.zip
+humman-multi-modal-4d-human-dataset-for-versatile-sensing-and-modeling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670549.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670549-supp.pdf
+dvs-voltmeter-stochastic-process-based-event-simulator-for-dynamic-vision-sensors,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670571.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670571-supp.pdf
+benchmarking-omni-vision-representation-through-the-lens-of-visual-realms,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670587.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670587-supp.zip
+beat-a-large-scale-semantic-and-emotional-multi-modal-dataset-for-conversational-gestures-synthesis,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670605.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670605-supp.pdf
+neuromorphic-data-augmentation-for-training-spiking-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670623.pdf,
+celebv-hq-a-large-scale-video-facial-attributes-dataset,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670641.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670641-supp.pdf
+moviecuts-a-new-dataset-and-benchmark-for-cut-type-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670659.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670659-supp.zip
+lamar-benchmarking-localization-and-mapping-for-augmented-reality,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670677.pdf,
+unitail-detecting-reading-and-matching-in-retail-scene,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670695.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670695-supp.pdf
+not-just-streaks-towards-ground-truth-for-single-image-deraining,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670713.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136670713-supp.pdf
+eccv-caption-correcting-false-negatives-by-collecting-machine-and-human-verified-image-caption-associations-for-ms-coco,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680001-supp.pdf
+motcom-the-multi-object-tracking-dataset-complexity-metric,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680019.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680019-supp.pdf
+how-to-synthesize-a-large-scale-and-trainable-micro-expression-dataset,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680037.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680037-supp.pdf
+a-real-world-dataset-for-multi-view-3d-reconstruction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680054.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680054-supp.zip
+realy-rethinking-the-evaluation-of-3d-face-reconstruction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680072.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680072-supp.pdf
+capturing-reconstructing-and-simulating-the-urbanscene3d-dataset,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680090.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680090-supp.pdf
+3d-compat-composition-of-materials-on-parts-of-3d-things,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680107.pdf,
+partimagenet-a-large-high-quality-dataset-of-parts,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680124.pdf,
+a-okvqa-a-benchmark-for-visual-question-answering-using-world-knowledge,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680141.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680141-supp.pdf
+ood-cv-a-benchmark-for-robustness-to-out-of-distribution-shifts-of-individual-nuisances-in-natural-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680158.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680158-supp.pdf
+facial-depth-and-normal-estimation-using-single-dual-pixel-camera,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680176.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680176-supp.pdf
+the-anatomy-of-video-editing-a-dataset-and-benchmark-suite-for-ai-assisted-video-editing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680195.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680195-supp.pdf
+stylebabel-artistic-style-tagging-and-captioning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680212.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680212-supp.pdf
+pandora-a-panoramic-detection-dataset-for-object-with-orientation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680229.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680229-supp.pdf
+fs-coco-towards-understanding-of-freehand-sketches-of-common-objects-in-context,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680245.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680245-supp.pdf
+exploring-fine-grained-audiovisual-categorization-with-the-ssw60-dataset,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680262.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680262-supp.pdf
+the-caltech-fish-counting-dataset-a-benchmark-for-multiple-object-tracking-and-counting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680281.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680281-supp.pdf
+a-dataset-for-interactive-vision-language-navigation-with-unknown-command-feasibility,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680304.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680304-supp.pdf
+brace-the-breakdancing-competition-dataset-for-dance-motion-synthesis,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680321.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680321-supp.pdf
+dress-code-high-resolution-multi-category-virtual-try-on,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680337.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680337-supp.pdf
+a-data-centric-approach-for-improving-ambiguous-labels-with-combined-semi-supervised-classification-and-clustering,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680354.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680354-supp.pdf
+clearpose-large-scale-transparent-object-dataset-and-benchmark,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680372.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680372-supp.pdf
+when-deep-classifiers-agree-analyzing-correlations-between-learning-order-and-image-statistics,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680388.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680388-supp.pdf
+animeceleb-large-scale-animation-celebheads-dataset-for-head-reenactment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680405.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680405-supp.pdf
+mugen-a-playground-for-video-audio-text-multimodal-understanding-and-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680421.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680421-supp.zip
+a-dense-material-segmentation-dataset-for-indoor-and-outdoor-scene-parsing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680440.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680440-supp.pdf
+mimicme-a-large-scale-diverse-4d-database-for-facial-expression-analysis,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680457.pdf,
+delving-into-universal-lesion-segmentation-method-dataset-and-benchmark,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680475.pdf,
+large-scale-real-world-multi-person-tracking,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680493.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680493-supp.pdf
+d2-tpred-discontinuous-dependency-for-trajectory-prediction-under-traffic-lights,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680512.pdf,
+the-missing-link-finding-label-relations-across-datasets,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680530.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680530-supp.pdf
+learning-omnidirectional-flow-in-360deg-video-via-siamese-representation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680546.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680546-supp.pdf
+vizwiz-fewshot-locating-objects-in-images-taken-by-people-with-visual-impairments,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680563.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680563-supp.pdf
+trove-transforming-road-scene-datasets-into-photorealistic-virtual-environments,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680579.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680579-supp.pdf
+trapped-in-texture-bias-a-large-scale-comparison-of-deep-instance-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680597.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680597-supp.pdf
+deformable-feature-aggregation-for-dynamic-multi-modal-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680616.pdf,
+welsa-learning-to-predict-6d-pose-from-weakly-labeled-data-using-shape-alignment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680633.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680633-supp.zip
+graph-r-cnn-towards-accurate-3d-object-detection-with-semantic-decorated-local-graph,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680650.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680650-supp.pdf
+mppnet-multi-frame-feature-intertwining-with-proxy-points-for-3d-temporal-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680667.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680667-supp.pdf
+long-tail-detection-with-effective-class-margins,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680684.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680684-supp.pdf
+semi-supervised-monocular-3d-object-detection-by-multi-view-consistency,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680702.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680702-supp.pdf
+ptseformer-progressive-temporal-spatial-enhanced-transformer-towards-video-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136680719.pdf,
+bevformer-learning-birds-eye-view-representation-from-multi-camera-images-via-spatiotemporal-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690001-supp.pdf
+category-level-6d-object-pose-and-size-estimation-using-self-supervised-deep-prior-deformation-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690019.pdf,
+dense-teacher-dense-pseudo-labels-for-semi-supervised-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690036.pdf,
+point-to-box-network-for-accurate-object-detection-via-single-point-supervision,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690053.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690053-supp.pdf
+domain-adaptive-hand-keypoint-and-pixel-localization-in-the-wild,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690070.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690070-supp.pdf
+towards-data-efficient-detection-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690090.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690090-supp.pdf
+open-vocabulary-detr-with-conditional-matching,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690107.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690107-supp.pdf
+prediction-guided-distillation-for-dense-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690123.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690123-supp.pdf
+multimodal-object-detection-via-probabilistic-ensembling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690139.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690139-supp.pdf
+exploiting-unlabeled-data-with-vision-and-language-models-for-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690156.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690156-supp.pdf
+cpo-change-robust-panorama-to-point-cloud-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690173.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690173-supp.pdf
+int-towards-infinite-frames-3d-detection-with-an-efficient-framework,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690190.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690190-supp.pdf
+end-to-end-weakly-supervised-object-detection-with-sparse-proposal-evolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690207.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690207-supp.pdf
+calibration-free-multi-view-crowd-counting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690224.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690224-supp.pdf
+unsupervised-domain-adaptation-for-monocular-3d-object-detection-via-self-training,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690242.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690242-supp.pdf
+superline3d-self-supervised-line-segmentation-and-description-for-lidar-point-cloud,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690259.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690259-supp.zip
+exploring-plain-vision-transformer-backbones-for-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690276.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690276-supp.pdf
+adversarially-aware-robust-object-detector,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690293.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690293-supp.pdf
+head-hetero-assists-distillation-for-heterogeneous-object-detectors,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690310.pdf,
+you-should-look-at-all-objects,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690327.pdf,
+detecting-twenty-thousand-classes-using-image-level-supervision,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690344.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690344-supp.pdf
+dcl-net-deep-correspondence-learning-network-for-6d-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690362.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690362-supp.pdf
+monocular-3d-object-detection-with-depth-from-motion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690380.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690380-supp.zip
+disp6d-disentangled-implicit-shape-and-pose-learning-for-scalable-6d-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690397.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690397-supp.pdf
+distilling-object-detectors-with-global-knowledge,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690415.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690415-supp.pdf
+unifying-visual-perception-by-dispersible-points-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690432.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690432-supp.pdf
+pseco-pseudo-labeling-and-consistency-training-for-semi-supervised-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690449.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690449-supp.pdf
+exploring-resolution-and-degradation-clues-as-self-supervised-signal-for-low-quality-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690465.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690465-supp.pdf
+robust-category-level-6d-pose-estimation-with-coarse-to-fine-rendering-of-neural-features,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690484.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690484-supp.pdf
+translation-scale-and-rotation-cross-modal-alignment-meets-rgb-infrared-vehicle-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690501.pdf,
+rfla-gaussian-receptive-field-based-label-assignment-for-tiny-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690518.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690518-supp.pdf
+rethinking-iou-based-optimization-for-single-stage-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690536.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690536-supp.pdf
+td-road-top-down-road-network-extraction-with-holistic-graph-construction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690553.pdf,
+multi-faceted-distillation-of-base-novel-commonality-for-few-shot-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690569.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690569-supp.pdf
+pointclm-a-contrastive-learning-based-framework-for-multi-instance-point-cloud-registration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690586.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690586-supp.pdf
+weakly-supervised-object-localization-via-transformer-with-implicit-spatial-calibration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690603.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690603-supp.pdf
+mttrans-cross-domain-object-detection-with-mean-teacher-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690620.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690620-supp.pdf
+multi-domain-multi-definition-landmark-localization-for-small-datasets,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690637.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690637-supp.pdf
+deviant-depth-equivariant-network-for-monocular-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690655.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690655-supp.pdf
+label-guided-auxiliary-training-improves-3d-object-detector,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690674.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690674-supp.pdf
+promptdet-towards-open-vocabulary-detection-using-uncurated-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690691.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690691-supp.pdf
+densely-constrained-depth-estimator-for-monocular-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690708.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690708-supp.pdf
+polarimetric-pose-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690726.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136690726-supp.pdf
+dfnet-enhance-absolute-pose-regression-with-direct-feature-matching,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700001-supp.pdf
+cornerformer-purifying-instances-for-corner-based-detectors,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700017.pdf,
+pillarnet-real-time-and-high-performance-pillar-based-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700034.pdf,
+robust-object-detection-with-inaccurate-bounding-boxes,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700052.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700052-supp.pdf
+efficient-decoder-free-object-detection-with-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700069.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700069-supp.pdf
+cross-modality-knowledge-distillation-network-for-monocular-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700085.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700085-supp.pdf
+react-temporal-action-detection-with-relational-queries,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700102.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700102-supp.pdf
+towards-accurate-active-camera-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700119.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700119-supp.pdf
+camera-pose-auto-encoders-for-improving-pose-regression,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700137.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700137-supp.pdf
+improving-the-intra-class-long-tail-in-3d-detection-via-rare-example-mining,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700155.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700155-supp.pdf
+bagging-regional-classification-activation-maps-for-weakly-supervised-object-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700174.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700174-supp.zip
+uc-owod-unknown-classified-open-world-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700191.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700191-supp.pdf
+raytran-3d-pose-estimation-and-shape-reconstruction-of-multiple-objects-from-videos-with-ray-traced-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700209.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700209-supp.pdf
+gtcar-graph-transformer-for-camera-re-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700227.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700227-supp.pdf
+3d-object-detection-with-a-self-supervised-lidar-scene-flow-backbone,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700244.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700244-supp.pdf
+open-vocabulary-object-detection-with-pseudo-bounding-box-labels,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700263.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700263-supp.pdf
+few-shot-object-detection-by-knowledge-distillation-using-bag-of-visual-words-representations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700279.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700279-supp.pdf
+salisa-saliency-based-input-sampling-for-efficient-video-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700296.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700296-supp.pdf
+eco-tr-efficient-correspondences-finding-via-coarse-to-fine-refinement,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700313.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700313-supp.pdf
+vote-from-the-center-6-dof-pose-estimation-in-rgb-d-images-by-radial-keypoint-voting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700331.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700331-supp.pdf
+long-tailed-instance-segmentation-using-gumbel-optimized-loss,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700349.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700349-supp.pdf
+detmatch-two-teachers-are-better-than-one-for-joint-2d-and-3d-semi-supervised-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700366.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700366-supp.pdf
+objectbox-from-centers-to-boxes-for-anchor-free-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700385.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700385-supp.pdf
+is-geometry-enough-for-matching-in-visual-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700402.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700402-supp.pdf
+swformer-sparse-window-transformer-for-3d-object-detection-in-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700422.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700422-supp.pdf
+pcr-cg-point-cloud-registration-via-deep-explicit-color-and-geometry,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700439.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700439-supp.pdf
+glamd-global-and-local-attention-mask-distillation-for-object-detectors,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700456.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700456-supp.zip
+fcaf3d-fully-convolutional-anchor-free-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700473.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700473-supp.pdf
+video-anomaly-detection-by-solving-decoupled-spatio-temporal-jigsaw-puzzles,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700490.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700490-supp.pdf
+class-agnostic-object-detection-with-multi-modal-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700507.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700507-supp.pdf
+enhancing-multi-modal-features-using-local-self-attention-for-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700527.pdf,
+object-detection-as-probabilistic-set-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700545.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700545-supp.pdf
+weakly-supervised-temporal-action-detection-for-fine-grained-videos-with-hierarchical-atomic-actions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700562.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700562-supp.pdf
+neural-correspondence-field-for-object-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700580.pdf,
+on-label-granularity-and-object-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700598.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700598-supp.pdf
+oimnet-prototypical-normalization-and-localization-aware-learning-for-person-search,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700615.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700615-supp.pdf
+out-of-distribution-identification-let-detector-tell-which-i-am-not-sure,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700631.pdf,
+learning-with-free-object-segments-for-long-tailed-instance-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700648.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700648-supp.pdf
+autoregressive-uncertainty-modeling-for-3d-bounding-box-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700665.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700665-supp.pdf
+3d-random-occlusion-and-multi-layer-projection-for-deep-multi-camera-pedestrian-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700681.pdf,
+a-simple-single-scale-vision-transformer-for-object-detection-and-instance-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700697.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700697-supp.pdf
+simple-open-vocabulary-object-detection-with-vision-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700714.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136700714-supp.pdf
+a-simple-approach-and-benchmark-for-21000-category-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710001.pdf,
+knowledge-condensation-distillation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710019.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710019-supp.pdf
+reducing-information-loss-for-spiking-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710036.pdf,
+masked-generative-distillation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710053.pdf,
+fine-grained-data-distribution-alignment-for-post-training-quantization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710070.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710070-supp.pdf
+learning-with-recoverable-forgetting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710087.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710087-supp.zip
+efficient-one-pass-self-distillation-with-zipfs-label-smoothing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710104.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710104-supp.pdf
+prune-your-model-before-distill-it,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710120.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710120-supp.pdf
+deep-partial-updating-towards-communication-efficient-updating-for-on-device-inference,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710137.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710137-supp.pdf
+patch-similarity-aware-data-free-quantization-for-vision-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710154.pdf,
+l3-accelerator-friendly-lossless-image-format-for-high-resolution-high-throughput-dnn-training,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710171.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710171-supp.pdf
+streaming-multiscale-deep-equilibrium-models,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710189.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710189-supp.pdf
+symmetry-regularization-and-saturating-nonlinearity-for-robust-quantization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710207.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710207-supp.pdf
+sp-net-slowly-progressing-dynamic-inference-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710225.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710225-supp.pdf
+equivariance-and-invariance-inductive-bias-for-learning-from-insufficient-data,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710242.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710242-supp.pdf
+mixed-precision-neural-network-quantization-via-learned-layer-wise-importance,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710260.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710260-supp.pdf
+event-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710276.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710276-supp.zip
+edgevits-competing-light-weight-cnns-on-mobile-devices-with-vision-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710294.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710294-supp.pdf
+palquant-accelerating-high-precision-networks-on-low-precision-accelerators,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710312.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710312-supp.pdf
+disentangled-differentiable-network-pruning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710329.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710329-supp.pdf
+ida-det-an-information-discrepancy-aware-distillation-for-1-bit-detectors,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710347.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710347-supp.pdf
+learning-to-weight-samples-for-dynamic-early-exiting-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710363.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710363-supp.pdf
+adabin-improving-binary-neural-networks-with-adaptive-binary-sets,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710380.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710380-supp.pdf
+adaptive-token-sampling-for-efficient-vision-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710397.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710397-supp.pdf
+weight-fixing-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710416.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710416-supp.pdf
+self-slimmed-vision-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710433.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710433-supp.pdf
+switchable-online-knowledge-distillation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710450.pdf,
+l-robustness-and-beyond-unleashing-efficient-adversarial-training,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710466.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710466-supp.pdf
+multi-granularity-pruning-for-model-acceleration-on-mobile-devices,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710483.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710483-supp.pdf
+deep-ensemble-learning-by-diverse-knowledge-distillation-for-fine-grained-object-classification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710501.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710501-supp.pdf
+helpful-or-harmful-inter-task-association-in-continual-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710518.pdf,
+towards-accurate-binary-neural-networks-via-modeling-contextual-dependencies,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710535.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710535-supp.pdf
+spin-an-empirical-evaluation-on-sharing-parameters-of-isotropic-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710552.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710552-supp.pdf
+ensemble-knowledge-guided-sub-network-search-and-fine-tuning-for-filter-pruning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710568.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710568-supp.pdf
+network-binarization-via-contrastive-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710585.pdf,
+lipschitz-continuity-retained-binary-neural-network,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710601.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710601-supp.pdf
+spvit-enabling-faster-vision-transformers-via-latency-aware-soft-token-pruning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710618.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710618-supp.pdf
+soft-masking-for-cost-constrained-channel-pruning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710640.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710640-supp.pdf
+non-uniform-step-size-quantization-for-accurate-post-training-quantization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710657.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710657-supp.pdf
+supertickets-drawing-task-agnostic-lottery-tickets-from-supernets-via-jointly-architecture-searching-and-parameter-pruning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710673.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710673-supp.pdf
+meta-gf-training-dynamic-depth-neural-networks-harmoniously,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710691.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710691-supp.pdf
+towards-ultra-low-latency-spiking-neural-networks-for-vision-and-sequential-tasks-using-temporal-pruning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710709.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710709-supp.zip
+towards-accurate-network-quantization-with-equivalent-smooth-regularizer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710726.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136710726-supp.pdf
+explicit-model-size-control-and-relaxation-via-smooth-regularization-for-mixed-precision-quantization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720001-supp.pdf
+basq-branch-wise-activation-clipping-search-quantization-for-sub-4-bit-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720017.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720017-supp.pdf
+you-already-have-it-a-generator-free-low-precision-dnn-training-framework-using-stochastic-rounding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720034.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720034-supp.pdf
+real-spike-learning-real-valued-spikes-for-spiking-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720052.pdf,
+fedltn-federated-learning-for-sparse-and-personalized-lottery-ticket-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720069.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720069-supp.pdf
+theoretical-understanding-of-the-information-flow-on-continual-learning-performance,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720085.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720085-supp.pdf
+exploring-lottery-ticket-hypothesis-in-spiking-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720101.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720101-supp.pdf
+on-the-angular-update-and-hyperparameter-tuning-of-a-scale-invariant-network,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720120.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720120-supp.pdf
+lana-latency-aware-network-acceleration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720136.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720136-supp.pdf
+rdo-q-extremely-fine-grained-channel-wise-quantization-via-rate-distortion-optimization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720156.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720156-supp.pdf
+u-boost-nas-utilization-boosted-differentiable-neural-architecture-search,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720172.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720172-supp.pdf
+ptq4vit-post-training-quantization-for-vision-transformers-with-twin-uniform-quantization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720190.pdf,
+bitwidth-adaptive-quantization-aware-neural-network-training-a-meta-learning-approach,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720207.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720207-supp.pdf
+understanding-the-dynamics-of-dnns-using-graph-modularity,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720224.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720224-supp.pdf
+latent-discriminant-deterministic-uncertainty,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720242.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720242-supp.pdf
+making-heads-or-tails-towards-semantically-consistent-visual-counterfactuals,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720260.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720260-supp.pdf
+hive-evaluating-the-human-interpretability-of-visual-explanations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720277.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720277-supp.pdf
+bayescap-bayesian-identity-cap-for-calibrated-uncertainty-in-frozen-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720295.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720295-supp.pdf
+sess-saliency-enhancing-with-scaling-and-sliding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720313.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720313-supp.pdf
+no-token-left-behind-explainability-aided-image-classification-and-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720329.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720329-supp.pdf
+interpretable-image-classification-with-differentiable-prototypes-assignment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720346.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720346-supp.zip
+contributions-of-shape-texture-and-color-in-visual-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720364.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720364-supp.pdf
+steex-steering-counterfactual-explanations-with-semantics,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720382.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720382-supp.pdf
+are-vision-transformers-robust-to-patch-perturbations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720399.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720399-supp.pdf
+a-dataset-generation-framework-for-evaluating-megapixel-image-classifiers-their-explanations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720416.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720416-supp.pdf
+cartoon-explanations-of-image-classifiers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720439.pdf,
+shap-cam-visual-explanations-for-convolutional-neural-networks-based-on-shapley-value,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720455.pdf,
+privacy-preserving-face-recognition-with-learnable-privacy-budgets-in-frequency-domain,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720471.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720471-supp.pdf
+contrast-phys-unsupervised-video-based-remote-physiological-measurement-via-spatiotemporal-contrast,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720488.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720488-supp.pdf
+source-free-domain-adaptation-with-contrastive-domain-alignment-and-self-supervised-exploration-for-face-anti-spoofing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720506.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720506-supp.pdf
+on-mitigating-hard-clusters-for-face-clustering,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720523.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720523-supp.pdf
+oneface-one-threshold-for-all,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720539.pdf,
+label2label-a-language-modeling-framework-for-multi-attribute-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720556.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720556-supp.pdf
+agetransgan-for-facial-age-transformation-with-rectified-performance-metrics,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720573.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720573-supp.pdf
+hierarchical-contrastive-inconsistency-learning-for-deepfake-video-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720588.pdf,
+rethinking-robust-representation-learning-under-fine-grained-noisy-faces,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720605.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720605-supp.pdf
+teaching-where-to-look-attention-similarity-knowledge-distillation-for-low-resolution-face-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720622.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720622-supp.pdf
+teaching-with-soft-label-smoothing-for-mitigating-noisy-labels-in-facial-expressions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720639.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720639-supp.pdf
+learning-dynamic-facial-radiance-fields-for-few-shot-talking-head-synthesis,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720657.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720657-supp.zip
+coupleface-relation-matters-for-face-recognition-distillation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720674.pdf,
+controllable-and-guided-face-synthesis-for-unconstrained-face-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720692.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720692-supp.pdf
+towards-robust-face-recognition-with-comprehensive-search,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720711.pdf,
+towards-unbiased-label-distribution-learning-for-facial-pose-estimation-using-anisotropic-spherical-gaussian,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136720728.pdf,
+au-aware-3d-face-reconstruction-through-personalized-au-specific-blendshape-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730001-supp.pdf
+bezierpalm-a-free-lunch-for-palmprint-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730019.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730019-supp.pdf
+adaptive-transformers-for-robust-few-shot-cross-domain-face-anti-spoofing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730037.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730037-supp.pdf
+face2facer-real-time-high-resolution-one-shot-face-reenactment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730055.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730055-supp.zip
+towards-racially-unbiased-skin-tone-estimation-via-scene-disambiguation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730072.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730072-supp.pdf
+boundaryface-a-mining-framework-with-noise-label-self-correction-for-face-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730092.pdf,
+pre-training-strategies-and-datasets-for-facial-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730109.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730109-supp.pdf
+look-both-ways-self-supervising-driver-gaze-estimation-and-road-scene-saliency,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730128.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730128-supp.pdf
+mfim-megapixel-facial-identity-manipulation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730145.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730145-supp.pdf
+3d-face-reconstruction-with-dense-landmarks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730162.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730162-supp.pdf
+emotion-aware-multi-view-contrastive-learning-for-facial-emotion-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730181.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730181-supp.zip
+order-learning-using-partially-ordered-data-via-chainization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730199.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730199-supp.pdf
+unsupervised-high-fidelity-facial-texture-generation-and-reconstruction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730215.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730215-supp.pdf
+multi-domain-learning-for-updating-face-anti-spoofing-models,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730232.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730232-supp.zip
+towards-metrical-reconstruction-of-human-faces,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730249.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730249-supp.zip
+discover-and-mitigate-unknown-biases-with-debiasing-alternate-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730270.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730270-supp.pdf
+unsupervised-and-semi-supervised-bias-benchmarking-in-face-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730288.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730288-supp.pdf
+towards-efficient-adversarial-training-on-vision-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730307.pdf,
+mime-minority-inclusion-for-majority-group-enhancement-of-ai-performance,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730327.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730327-supp.pdf
+studying-bias-in-gans-through-the-lens-of-race,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730345.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730345-supp.pdf
+trust-but-verify-using-self-supervised-probing-to-improve-trustworthiness,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730362.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730362-supp.pdf
+learning-to-censor-by-noisy-sampling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730378.pdf,
+an-invisible-black-box-backdoor-attack-through-frequency-domain,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730396.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730396-supp.pdf
+fairgrape-fairness-aware-gradient-pruning-method-for-face-attribute-classification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730414.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730414-supp.pdf
+attaining-class-level-forgetting-in-pretrained-model-using-few-samples,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730433.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730433-supp.zip
+anti-neuron-watermarking-protecting-personal-data-against-unauthorized-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730449.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730449-supp.zip
+an-impartial-take-to-the-cnn-vs-transformer-robustness-contest,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730466.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730466-supp.pdf
+recover-fair-deep-classification-models-via-altering-pre-trained-structure,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730482.pdf,
+decouple-and-sample-protecting-sensitive-information-in-task-agnostic-data-release,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730499.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730499-supp.pdf
+privacy-preserving-action-recognition-via-motion-difference-quantization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730518.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730518-supp.pdf
+latent-space-smoothing-for-individually-fair-representations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730535.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730535-supp.pdf
+parameterized-temperature-scaling-for-boosting-the-expressive-power-in-post-hoc-uncertainty-calibration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730554.pdf,
+fairstyle-debiasing-stylegan2-with-style-channel-manipulations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730569.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730569-supp.pdf
+distilling-the-undistillable-learning-from-a-nasty-teacher,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730586.pdf,
+sos-self-supervised-learning-over-sets-of-handled-objects-in-egocentric-action-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730603.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730603-supp.pdf
+egocentric-activity-recognition-and-localization-on-a-3d-map,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730620.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730620-supp.pdf
+generative-adversarial-network-for-future-hand-segmentation-from-egocentric-video,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730638.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730638-supp.zip
+my-view-is-the-best-view-procedure-learning-from-egocentric-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730656.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730656-supp.pdf
+gimo-gaze-informed-human-motion-prediction-in-context,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730675.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730675-supp.pdf
+image-based-clip-guided-essence-transfer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730693.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730693-supp.pdf
+detecting-and-recovering-sequential-deepfake-manipulation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730710.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730710-supp.pdf
+self-supervised-sparse-representation-for-video-anomaly-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730727.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136730727-supp.pdf
+watermark-vaccine-adversarial-attacks-to-prevent-watermark-removal,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740001-supp.pdf
+explaining-deepfake-detection-by-analysing-image-matching,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740018.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740018-supp.pdf
+frequencylowcut-pooling-plug-play-against-catastrophic-overfitting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740036.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740036-supp.pdf
+tafim-targeted-adversarial-attacks-against-facial-image-manipulations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740053.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740053-supp.pdf
+fingerprintnet-synthesized-fingerprints-for-generated-image-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740071.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740071-supp.pdf
+detecting-generated-images-by-real-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740089.pdf,
+an-information-theoretic-approach-for-attention-driven-face-forgery-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740105.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740105-supp.pdf
+exploring-disentangled-content-information-for-face-forgery-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740122.pdf,
+repmix-representation-mixing-for-robust-attribution-of-synthesized-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740140.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740140-supp.pdf
+totems-physical-objects-for-verifying-visual-integrity,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740158.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740158-supp.pdf
+dual-stream-knowledge-preserving-hashing-for-unsupervised-video-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740175.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740175-supp.pdf
+pass-part-aware-self-supervised-pre-training-for-person-re-identification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740192.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740192-supp.zip
+adaptive-cross-domain-learning-for-generalizable-person-re-identification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740209.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740209-supp.pdf
+multi-query-video-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740227.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740227-supp.zip
+hierarchical-average-precision-training-for-pertinent-image-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740244.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740244-supp.pdf
+learning-semantic-correspondence-with-sparse-annotations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740261.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740261-supp.pdf
+dynamically-transformed-instance-normalization-network-for-generalizable-person-re-identification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740279.pdf,
+domain-adaptive-person-search,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740295.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740295-supp.pdf
+ts2-net-token-shift-and-selection-transformer-for-text-video-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740311.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740311-supp.pdf
+unstructured-feature-decoupling-for-vehicle-re-identification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740328.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740328-supp.pdf
+deep-hash-distillation-for-image-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740345.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740345-supp.pdf
+mimic-embedding-via-adaptive-aggregation-learning-generalizable-person-re-identification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740362.pdf,
+granularity-aware-adaptation-for-image-retrieval-over-multiple-tasks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740379.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740379-supp.pdf
+learning-audio-video-modalities-from-image-captions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740396.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740396-supp.pdf
+rvsl-robust-vehicle-similarity-learning-in-real-hazy-scenes-based-on-semi-supervised-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740415.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740415-supp.pdf
+lightweight-attentional-feature-fusion-a-new-baseline-for-text-to-video-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740432.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740432-supp.pdf
+modality-synergy-complement-learning-with-cascaded-aggregation-for-visible-infrared-person-re-identification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740450.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740450-supp.pdf
+cross-modality-transformer-for-visible-infrared-person-re-identification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740467.pdf,
+audio-visual-mismatch-aware-video-retrieval-via-association-and-adjustment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740484.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740484-supp.pdf
+connecting-compression-spaces-with-transformer-for-approximate-nearest-neighbor-search,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740502.pdf,
+semicon-a-learning-to-hash-solution-for-large-scale-fine-grained-image-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740518.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740518-supp.pdf
+cavit-contextual-alignment-vision-transformer-for-video-object-re-identification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740535.pdf,
+text-based-temporal-localization-of-novel-events,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740552.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740552-supp.pdf
+reliability-aware-prediction-via-uncertainty-learning-for-person-image-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740572.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740572-supp.pdf
+relighting4d-neural-relightable-human-from-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740589.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740589-supp.pdf
+real-time-intermediate-flow-estimation-for-video-frame-interpolation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740608.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740608-supp.pdf
+pixelfolder-an-efficient-progressive-pixel-synthesis-network-for-image-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740626.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740626-supp.pdf
+styleswap-style-based-generator-empowers-robust-face-swapping,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740644.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740644-supp.zip
+paint2pix-interactive-painting-based-progressive-image-synthesis-and-editing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740662.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740662-supp.pdf
+furrygan-high-quality-foreground-aware-image-synthesis,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740679.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740679-supp.pdf
+scam-transferring-humans-between-images-with-semantic-cross-attention-modulation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740696.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740696-supp.pdf
+sem2nerf-converting-single-view-semantic-masks-to-neural-radiance-fields,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740713.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740713-supp.pdf
+wavegan-frequency-aware-gan-for-high-fidelity-few-shot-image-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750001-supp.pdf
+end-to-end-visual-editing-with-a-generatively-pre-trained-artist,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750018.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750018-supp.pdf
+high-fidelity-gan-inversion-with-padding-space,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750036.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750036-supp.pdf
+designing-one-unified-framework-for-high-fidelity-face-reenactment-and-swapping,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750053.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750053-supp.pdf
+sobolev-training-for-implicit-neural-representations-with-approximated-image-derivatives,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750070.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750070-supp.pdf
+make-a-scene-scene-based-text-to-image-generation-with-human-priors,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750087.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750087-supp.pdf
+3d-fm-gan-towards-3d-controllable-face-manipulation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750106.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750106-supp.pdf
+multi-curve-translator-for-high-resolution-photorealistic-image-translation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750124.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750124-supp.pdf
+deep-bayesian-video-frame-interpolation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750141.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750141-supp.pdf
+cross-attention-based-style-distribution-for-controllable-person-image-synthesis,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750158.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750158-supp.zip
+keypointnerf-generalizing-image-based-volumetric-avatars-using-relative-spatial-encoding-of-keypoints,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750176.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750176-supp.pdf
+viewformer-nerf-free-neural-rendering-from-few-images-using-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750195.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750195-supp.pdf
+l-tracing-fast-light-visibility-estimation-on-neural-surfaces-by-sphere-tracing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750214.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750214-supp.pdf
+a-perceptual-quality-metric-for-video-frame-interpolation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750231.pdf,
+adaptive-feature-interpolation-for-low-shot-image-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750251.pdf,
+palgan-image-colorization-with-palette-generative-adversarial-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750268.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750268-supp.pdf
+fast-vid2vid-spatial-temporal-compression-for-video-to-video-synthesis,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750285.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750285-supp.pdf
+learning-prior-feature-and-attention-enhanced-image-inpainting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750303.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750303-supp.pdf
+temporal-mpi-enabling-multi-plane-images-for-dynamic-scene-modelling-via-temporal-basis-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750321.pdf,
+3d-aware-semantic-guided-generative-model-for-human-synthesis,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750337.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750337-supp.pdf
+temporally-consistent-semantic-video-editing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750355.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750355-supp.pdf
+error-compensation-framework-for-flow-guided-video-inpainting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750373.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750373-supp.pdf
+scraping-textures-from-natural-images-for-synthesis-and-editing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750389.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750389-supp.pdf
+single-stage-virtual-try-on-via-deformable-attention-flows,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750406.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750406-supp.pdf
+improving-gans-for-long-tailed-data-through-group-spectral-regularization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750423.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750423-supp.pdf
+hierarchical-semantic-regularization-of-latent-spaces-in-stylegans,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750440.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750440-supp.pdf
+interestyle-encoding-an-interest-region-for-robust-stylegan-inversion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750457.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750457-supp.pdf
+stylelight-hdr-panorama-generation-for-lighting-estimation-and-editing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750474.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750474-supp.pdf
+contrastive-monotonic-pixel-level-modulation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750491.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750491-supp.pdf
+learning-cross-video-neural-representations-for-high-quality-frame-interpolation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750509.pdf,
+learning-continuous-implicit-representation-for-near-periodic-patterns,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750527.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750527-supp.pdf
+end-to-end-graph-constrained-vectorized-floorplan-generation-with-panoptic-refinement,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750545.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750545-supp.pdf
+few-shot-image-generation-with-mixup-based-distance-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750561.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750561-supp.pdf
+a-style-based-gan-encoder-for-high-fidelity-reconstruction-of-images-and-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750579.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750579-supp.pdf
+fakeclr-exploring-contrastive-learning-for-solving-latent-discontinuity-in-data-efficient-gans,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750596.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750596-supp.pdf
+blobgan-spatially-disentangled-scene-representations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750613.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750613-supp.pdf
+unified-implicit-neural-stylization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750633.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750633-supp.pdf
+gan-with-multivariate-disentangling-for-controllable-hair-editing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750653.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750653-supp.pdf
+discovering-transferable-forensic-features-for-cnn-generated-images-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750669.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750669-supp.pdf
+harmonizer-learning-to-perform-white-box-image-and-video-harmonization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750688.pdf,
+text2live-text-driven-layered-image-and-video-editing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750705.pdf,
+digging-into-radiance-grid-for-real-time-view-synthesis-with-detail-preservation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136750722.pdf,
+stylegan-human-a-data-centric-odyssey-of-human-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760001-supp.pdf
+colorformer-image-colorization-via-color-memory-assisted-hybrid-attention-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760020.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760020-supp.pdf
+eagan-efficient-two-stage-evolutionary-architecture-search-for-gans,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760036.pdf,
+weakly-supervised-stitching-network-for-real-world-panoramic-image-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760052.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760052-supp.pdf
+dynast-dynamic-sparse-transformer-for-exemplar-guided-image-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760070.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760070-supp.pdf
+multimodal-conditional-image-synthesis-with-product-of-experts-gans,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760089.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760089-supp.pdf
+auto-regressive-image-synthesis-with-integrated-quantization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760106.pdf,
+jojogan-one-shot-face-stylization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760124.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760124-supp.pdf
+vecgan-image-to-image-translation-with-interpretable-latent-directions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760141.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760141-supp.pdf
+any-resolution-training-for-high-resolution-image-synthesis,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760158.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760158-supp.pdf
+ccpl-contrastive-coherence-preserving-loss-for-versatile-style-transfer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760176.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760176-supp.pdf
+canf-vc-conditional-augmented-normalizing-flows-for-video-compression,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760193.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760193-supp.pdf
+bi-level-feature-alignment-for-versatile-image-translation-and-manipulation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760210.pdf,
+high-fidelity-image-inpainting-with-gan-inversion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760228.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760228-supp.pdf
+deltagan-towards-diverse-few-shot-image-generation-with-sample-specific-delta,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760245.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760245-supp.pdf
+image-inpainting-with-cascaded-modulation-gan-and-object-aware-training,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760263.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760263-supp.pdf
+styleface-towards-identity-disentangled-face-generation-on-megapixels,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760281.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760281-supp.pdf
+video-extrapolation-in-space-and-time,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760297.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760297-supp.pdf
+contrastive-learning-for-diverse-disentangled-foreground-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760313.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760313-supp.pdf
+bips-bi-modal-indoor-panorama-synthesis-via-residual-depth-aided-adversarial-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760331.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760331-supp.pdf
+augmentation-of-rppg-benchmark-datasets-learning-to-remove-and-embed-rppg-signals-via-double-cycle-consistent-learning-from-unpaired-facial-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760351.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760351-supp.zip
+geometry-aware-single-image-full-body-human-relighting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760367.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760367-supp.pdf
+3d-aware-indoor-scene-synthesis-with-depth-priors,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760385.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760385-supp.pdf
+deep-portrait-delighting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760402.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760402-supp.zip
+vector-quantized-image-to-image-translation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760419.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760419-supp.pdf
+the-surprisingly-straightforward-scene-text-removal-method-with-gated-attention-and-region-of-interest-generation-a-comprehensive-prominent-model-analysis,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760436.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760436-supp.pdf
+free-viewpoint-rgb-d-human-performance-capture-and-rendering,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760452.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760452-supp.pdf
+multiview-regenerative-morphing-with-dual-flows,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760469.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760469-supp.pdf
+hallucinating-pose-compatible-scenes,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760487.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760487-supp.pdf
+motion-and-appearance-adaptation-for-cross-domain-motion-transfer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760506.pdf,
+layered-controllable-video-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760523.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760523-supp.pdf
+custom-structure-preservation-in-face-aging,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760541.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760541-supp.pdf
+spatio-temporal-deformable-attention-network-for-video-deblurring,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760558.pdf,
+neumesh-learning-disentangled-neural-mesh-based-implicit-field-for-geometry-and-texture-editing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760574.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760574-supp.zip
+nerf-for-outdoor-scene-relighting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760593.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760593-supp.zip
+cogs-controllable-generation-and-search-from-sketch-and-style,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760610.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760610-supp.pdf
+hairnet-hairstyle-transfer-with-pose-changes,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760628.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760628-supp.pdf
+unbiased-multi-modality-guidance-for-image-inpainting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760645.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760645-supp.pdf
+intelli-paint-towards-developing-more-human-intelligible-painting-agents,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760662.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760662-supp.pdf
+motion-transformer-for-unsupervised-image-animation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760679.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760679-supp.pdf
+nuwa-visual-synthesis-pre-training-for-neural-visual-world-creation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760697.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760697-supp.pdf
+elegant-exquisite-and-locally-editable-gan-for-makeup-transfer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760714.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136760714-supp.pdf
+editing-out-of-domain-gan-inversion-via-differential-activations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770001-supp.zip
+on-the-robustness-of-quality-measures-for-gans,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770018.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770018-supp.pdf
+sound-guided-semantic-video-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770034.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770034-supp.pdf
+inpainting-at-modern-camera-resolution-by-guided-patchmatch-with-auto-curation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770051.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770051-supp.pdf
+controllable-video-generation-through-global-and-local-motion-dynamics,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770069.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770069-supp.pdf
+styleheat-one-shot-high-resolution-editable-talking-face-generation-via-pre-trained-stylegan,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770086.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770086-supp.pdf
+long-video-generation-with-time-agnostic-vqgan-and-time-sensitive-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770103.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770103-supp.pdf
+combining-internal-and-external-constraints-for-unrolling-shutter-in-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770120.pdf,
+wise-whitebox-image-stylization-by-example-based-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770136.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770136-supp.pdf
+neural-radiance-transfer-fields-for-relightable-novel-view-synthesis-with-global-illumination,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770155.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770155-supp.zip
+transformers-as-meta-learners-for-implicit-neural-representations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770173.pdf,
+style-your-hair-latent-optimization-for-pose-invariant-hairstyle-transfer-via-local-style-aware-hair-alignment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770191.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770191-supp.pdf
+high-resolution-virtual-try-on-with-misalignment-and-occlusion-handled-conditions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770208.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770208-supp.pdf
+a-codec-information-assisted-framework-for-efficient-compressed-video-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770224.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770224-supp.pdf
+injecting-3d-perception-of-controllable-nerf-gan-into-stylegan-for-editable-portrait-image-synthesis,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770240.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770240-supp.pdf
+adanerf-adaptive-sampling-for-real-time-rendering-of-neural-radiance-fields,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770258.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770258-supp.pdf
+improving-the-perceptual-quality-of-2d-animation-interpolation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770275.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770275-supp.zip
+selective-transhdr-transformer-based-selective-hdr-imaging-using-ghost-region-mask,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770292.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770292-supp.pdf
+learning-series-parallel-lookup-tables-for-efficient-image-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770309.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770309-supp.pdf
+geoaug-data-augmentation-for-few-shot-nerf-with-geometry-constraints,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770326.pdf,
+doodleformer-creative-sketch-drawing-with-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770343.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770343-supp.pdf
+implicit-neural-representations-for-variable-length-human-motion-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770359.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770359-supp.pdf
+learning-object-placement-via-dual-path-graph-completion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770376.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770376-supp.pdf
+expanded-adaptive-scaling-normalization-for-end-to-end-image-compression,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770392.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770392-supp.pdf
+generator-knows-what-discriminator-should-learn-in-unconditional-gans,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770408.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770408-supp.pdf
+compositional-visual-generation-with-composable-diffusion-models,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770426.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770426-supp.pdf
+manifest-manifold-deformation-for-few-shot-image-translation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770443.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770443-supp.zip
+supervised-attribute-information-removal-and-reconstruction-for-image-manipulation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770460.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770460-supp.pdf
+blt-bidirectional-layout-transformer-for-controllable-layout-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770477.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770477-supp.pdf
+diverse-generation-from-a-single-video-made-possible,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770494.pdf,
+rayleigh-eigendirections-reds-nonlinear-gan-latent-space-traversals-for-multidimensional-features,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770513.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770513-supp.pdf
+bridging-the-domain-gap-towards-generalization-in-automatic-colorization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770530.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770530-supp.pdf
+generating-natural-images-with-direct-patch-distributions-matching,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770547.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770547-supp.pdf
+context-consistent-semantic-image-editing-with-style-preserved-modulation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770564.pdf,
+eliminating-gradient-conflict-in-reference-based-line-art-colorization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770582.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770582-supp.pdf
+unsupervised-learning-of-efficient-geometry-aware-neural-articulated-representations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770600.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770600-supp.pdf
+jpeg-artifacts-removal-via-contrastive-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770618.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770618-supp.pdf
+unpaired-deep-image-dehazing-using-contrastive-disentanglement-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770636.pdf,
+efficient-long-range-attention-network-for-image-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770653.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770653-supp.pdf
+flowformer-a-transformer-architecture-for-optical-flow,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770672.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770672-supp.zip
+coarse-to-fine-sparse-transformer-for-hyperspectral-image-reconstruction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770690.pdf,
+learning-shadow-correspondence-for-video-shadow-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770709.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770709-supp.pdf
+metric-learning-based-interactive-modulation-for-real-world-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770727.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136770727-supp.pdf
+dynamic-dual-trainable-bounds-for-ultra-low-precision-super-resolution-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780001-supp.pdf
+osformer-one-stage-camouflaged-instance-segmentation-with-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780019.pdf,
+highly-accurate-dichotomous-image-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780036.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780036-supp.pdf
+boosting-supervised-dehazing-methods-via-bi-level-patch-reweighting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780055.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780055-supp.pdf
+flow-guided-transformer-for-video-inpainting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780072.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780072-supp.pdf
+shift-tolerant-perceptual-similarity-metric,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780089.pdf,
+perception-distortion-balanced-admm-optimization-for-single-image-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780106.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780106-supp.pdf
+vqfr-blind-face-restoration-with-vector-quantized-dictionary-and-parallel-decoder,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780124.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780124-supp.pdf
+uncertainty-learning-in-kernel-estimation-for-multi-stage-blind-image-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780141.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780141-supp.pdf
+learning-spatio-temporal-downsampling-for-effective-video-upscaling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780159.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780159-supp.pdf
+learning-local-implicit-fourier-representation-for-image-warping,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780179.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780179-supp.pdf
+seplut-separable-image-adaptive-lookup-tables-for-real-time-image-enhancement,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780197.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780197-supp.pdf
+blind-image-decomposition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780214.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780214-supp.pdf
+mulut-cooperating-multiple-look-up-tables-for-efficient-image-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780234.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780234-supp.pdf
+learning-spatiotemporal-frequency-transformer-for-compressed-video-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780252.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780252-supp.pdf
+spatial-frequency-domain-information-integration-for-pan-sharpening,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780268.pdf,
+adaptive-patch-exiting-for-scalable-single-image-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780286.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780286-supp.pdf
+efficient-meta-tuning-for-content-aware-neural-video-delivery,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780302.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780302-supp.pdf
+reference-based-image-super-resolution-with-deformable-attention-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780318.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780318-supp.pdf
+local-color-distributions-prior-for-image-enhancement,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780336.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780336-supp.pdf
+l-coder-language-based-colorization-with-color-object-decoupling-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780352.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780352-supp.pdf
+from-face-to-natural-image-learning-real-degradation-for-blind-image-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780368.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780368-supp.pdf
+towards-interpretable-video-super-resolution-via-alternating-optimization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780385.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780385-supp.pdf
+event-based-fusion-for-motion-deblurring-with-cross-modal-attention,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780403.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780403-supp.pdf
+fast-and-high-quality-image-denoising-via-malleable-convolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780420.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780420-supp.pdf
+tape-task-agnostic-prior-embedding-for-image-restoration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780438.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780438-supp.pdf
+uncertainty-inspired-underwater-image-enhancement,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780456.pdf,
+hourglass-attention-network-for-image-inpainting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780474.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780474-supp.pdf
+unfolded-deep-kernel-estimation-for-blind-image-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780493.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780493-supp.pdf
+event-guided-deblurring-of-unknown-exposure-time-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780510.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780510-supp.zip
+reconet-recurrent-correction-network-for-fast-and-efficient-multi-modality-image-fusion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780528.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780528-supp.pdf
+content-adaptive-latents-and-decoder-for-neural-image-compression,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780545.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780545-supp.pdf
+efficient-and-degradation-adaptive-network-for-real-world-image-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780563.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780563-supp.pdf
+unidirectional-video-denoising-by-mimicking-backward-recurrent-modules-with-look-ahead-forward-ones,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780581.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780581-supp.pdf
+self-supervised-learning-for-real-world-super-resolution-from-dual-zoomed-observations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780599.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780599-supp.pdf
+secrets-of-event-based-optical-flow,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780616.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780616-supp.pdf
+towards-efficient-and-scale-robust-ultra-high-definition-image-demoireing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780634.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780634-supp.pdf
+erdn-equivalent-receptive-field-deformable-network-for-video-deblurring,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780651.pdf,
+rethinking-generic-camera-models-for-deep-single-image-camera-calibration-to-recover-rotation-and-fisheye-distortion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780668.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780668-supp.zip
+art-ss-an-adaptive-rejection-technique-for-semi-supervised-restoration-for-adverse-weather-affected-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780688.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780688-supp.zip
+fusion-from-decomposition-a-self-supervised-decomposition-approach-for-image-fusion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780706.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780706-supp.pdf
+learning-degradation-representations-for-image-deblurring,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780724.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136780724-supp.pdf
+learning-mutual-modulation-for-self-supervised-cross-modal-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790001-supp.pdf
+spectrum-aware-and-transferable-architecture-search-for-hyperspectral-image-restoration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790019.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790019-supp.pdf
+neural-color-operators-for-sequential-image-retouching,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790037.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790037-supp.pdf
+optimizing-image-compression-via-joint-learning-with-denoising,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790054.pdf,
+restore-globally-refine-locally-a-mask-guided-scheme-to-accelerate-super-resolution-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790072.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790072-supp.zip
+compiler-aware-neural-architecture-search-for-on-mobile-real-time-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790089.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790089-supp.pdf
+modeling-mask-uncertainty-in-hyperspectral-image-reconstruction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790109.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790109-supp.pdf
+perceiving-and-modeling-density-for-image-dehazing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790126.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790126-supp.pdf
+stripformer-strip-transformer-for-fast-image-deblurring,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790142.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790142-supp.pdf
+deep-fourier-based-exposure-correction-network-with-spatial-frequency-interaction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790159.pdf,
+frequency-and-spatial-dual-guidance-for-image-dehazing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790177.pdf,
+towards-real-world-hdrtv-reconstruction-a-data-synthesis-based-approach,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790195.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790195-supp.pdf
+learning-discriminative-shrinkage-deep-networks-for-image-deconvolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790212.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790212-supp.pdf
+kxnet-a-model-driven-deep-neural-network-for-blind-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790230.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790230-supp.pdf
+arm-any-time-super-resolution-method,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790248.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790248-supp.pdf
+attention-aware-learning-for-hyperparameter-prediction-in-image-processing-pipelines,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790265.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790265-supp.pdf
+realflow-em-based-realistic-optical-flow-dataset-generation-from-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790282.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790282-supp.pdf
+memory-augmented-model-driven-network-for-pansharpening,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790299.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790299-supp.pdf
+all-you-need-is-raw-defending-against-adversarial-attacks-with-camera-image-pipelines,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790316.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790316-supp.pdf
+ghost-free-high-dynamic-range-imaging-with-context-aware-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790336.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790336-supp.pdf
+style-guided-shadow-removal,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790353.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790353-supp.pdf
+d2c-sr-a-divergence-to-convergence-approach-for-real-world-image-super-resolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790370.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790370-supp.pdf
+grit-vlp-grouped-mini-batch-sampling-for-efficient-vision-and-language-pre-training,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790386.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790386-supp.pdf
+efficient-video-deblurring-guided-by-motion-magnitude,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790403.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790403-supp.zip
+single-frame-atmospheric-turbulence-mitigation-a-benchmark-study-and-a-new-physics-inspired-transformer-model,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790419.pdf,
+contextformer-a-transformer-with-spatio-channel-attention-for-context-modeling-in-learned-image-compression,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790436.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790436-supp.pdf
+image-super-resolution-with-deep-dictionary,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790454.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790454-supp.pdf
+tempformer-temporally-consistent-transformer-for-video-denoising,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790471.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790471-supp.zip
+rawtobit-a-fully-end-to-end-camera-isp-network,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790487.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790487-supp.pdf
+drcnet-dynamic-image-restoration-contrastive-network,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790504.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790504-supp.pdf
+zero-shot-learning-for-reflection-removal-of-single-360-degree-image,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790523.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790523-supp.pdf
+transformer-with-implicit-edges-for-particle-based-physics-simulation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790539.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790539-supp.pdf
+rethinking-video-rain-streak-removal-a-new-synthesis-model-and-a-deraining-network-with-video-rain-prior,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790556.pdf,
+super-resolution-by-predicting-offsets-an-ultra-efficient-super-resolution-network-for-rasterized-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790572.pdf,
+animation-from-blur-multi-modal-blur-decomposition-with-motion-guidance,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790588.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790588-supp.zip
+alphavc-high-performance-and-efficient-learned-video-compression,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790605.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790605-supp.pdf
+content-oriented-learned-image-compression,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790621.pdf,
+rrsr-reciprocal-reference-based-image-super-resolution-with-progressive-feature-alignment-and-selection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790637.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790637-supp.pdf
+contrastive-prototypical-network-with-wasserstein-confidence-penalty,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790654.pdf,
+learn-to-decompose-cascaded-decomposition-network-for-cross-domain-few-shot-facial-expression-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790672.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790672-supp.pdf
+self-support-few-shot-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790689.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790689-supp.pdf
+few-shot-object-detection-with-model-calibration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790707.pdf,
+self-supervision-can-be-a-good-few-shot-learner,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790726.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136790726-supp.pdf
+tsf-transformer-based-semantic-filter-for-few-shot-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800001.pdf,
+adversarial-feature-augmentation-for-cross-domain-few-shot-classification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800019.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800019-supp.pdf
+constructing-balance-from-imbalance-for-long-tailed-image-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800036.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800036-supp.pdf
+on-multi-domain-long-tailed-recognition-imbalanced-domain-generalization-and-beyond,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800054.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800054-supp.pdf
+few-shot-video-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800071.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800071-supp.pdf
+worst-case-matters-for-few-shot-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800092.pdf,
+exploring-hierarchical-graph-representation-for-large-scale-zero-shot-image-classification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800108.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800108-supp.zip
+doubly-deformable-aggregation-of-covariance-matrices-for-few-shot-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800125.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800125-supp.pdf
+dense-cross-query-and-support-attention-weighted-mask-aggregation-for-few-shot-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800142.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800142-supp.pdf
+rethinking-clustering-based-pseudo-labeling-for-unsupervised-meta-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800160.pdf,
+claster-clustering-with-reinforcement-learning-for-zero-shot-action-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800177.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800177-supp.pdf
+few-shot-class-incremental-learning-for-3d-point-cloud-objects,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800194.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800194-supp.pdf
+meta-learning-with-less-forgetting-on-large-scale-non-stationary-task-distributions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800211.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800211-supp.pdf
+dna-improving-few-shot-transfer-learning-with-low-rank-decomposition-and-alignment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800229.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800229-supp.pdf
+learning-instance-and-task-aware-dynamic-kernels-for-few-shot-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800247.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800247-supp.pdf
+open-world-semantic-segmentation-via-contrasting-and-clustering-vision-language-embedding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800265.pdf,
+few-shot-classification-with-contrastive-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800283.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800283-supp.pdf
+time-reversed-diffusion-tensor-transformer-a-new-tenet-of-few-shot-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800300.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800300-supp.pdf
+self-promoted-supervision-for-few-shot-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800318.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800318-supp.pdf
+few-shot-object-counting-and-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800336.pdf,
+rethinking-few-shot-object-detection-on-a-multi-domain-benchmark,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800354.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800354-supp.pdf
+cross-domain-cross-set-few-shot-learning-via-learning-compact-and-aligned-representations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800371.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800371-supp.pdf
+mutually-reinforcing-structure-with-proposal-contrastive-consistency-for-few-shot-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800388.pdf,
+dual-contrastive-learning-with-anatomical-auxiliary-supervision-for-few-shot-medical-image-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800406.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800406-supp.pdf
+improving-few-shot-learning-through-multi-task-representation-learning-theory,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800423.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800423-supp.pdf
+tree-structure-aware-few-shot-image-classification-via-hierarchical-aggregation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800440.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800440-supp.pdf
+inductive-and-transductive-few-shot-video-classification-via-appearance-and-temporal-alignments,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800457.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800457-supp.pdf
+temporal-and-cross-modal-attention-for-audio-visual-zero-shot-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800474.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800474-supp.pdf
+hm-hybrid-masking-for-few-shot-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800492.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800492-supp.pdf
+transvlad-focusing-on-locally-aggregated-descriptors-for-few-shot-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800509.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800509-supp.pdf
+kernel-relative-prototype-spectral-filtering-for-few-shot-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800527.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800527-supp.pdf
+this-is-my-unicorn-fluffy-personalizing-frozen-vision-language-representations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800544.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800544-supp.pdf
+close-curriculum-learning-on-the-sharing-extent-towards-better-one-shot-nas,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800563.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800563-supp.pdf
+streamable-neural-fields,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800580.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800580-supp.zip
+gradient-based-uncertainty-for-monocular-depth-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800598.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800598-supp.pdf
+online-continual-learning-with-contrastive-vision-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800614.pdf,
+cprune-compiler-informed-model-pruning-for-efficient-target-aware-dnn-execution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800634.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800634-supp.pdf
+eautodet-efficient-architecture-search-for-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800652.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800652-supp.pdf
+a-max-flow-based-approach-for-neural-architecture-search,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800668.pdf,
+occamnets-mitigating-dataset-bias-by-favoring-simpler-hypotheses,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800685.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800685-supp.zip
+era-enhanced-rational-activations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800705.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800705-supp.pdf
+convolutional-embedding-makes-hierarchical-vision-transformer-stronger,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800722.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136800722-supp.pdf
+active-label-correction-using-robust-parameter-update-and-entropy-propagation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810001-supp.pdf
+unpaired-image-translation-via-vector-symbolic-architectures,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810017.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810017-supp.pdf
+uninet-unified-architecture-search-with-convolution-transformer-and-mlp,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810034.pdf,
+amixer-adaptive-weight-mixing-for-self-attention-free-vision-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810051.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810051-supp.pdf
+tinyvit-fast-pretraining-distillation-for-small-vision-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810068.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810068-supp.pdf
+equivariant-hypergraph-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810086.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810086-supp.pdf
+scalenet-searching-for-the-model-to-scale,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810103.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810103-supp.pdf
+complementing-brightness-constancy-with-deep-networks-for-optical-flow-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810120.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810120-supp.pdf
+vitas-vision-transformer-architecture-search,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810138.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810138-supp.pdf
+lidarnas-unifying-and-searching-neural-architectures-for-3d-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810156.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810156-supp.pdf
+uncertainty-dtw-for-time-series-and-sequences,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810174.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810174-supp.pdf
+black-box-few-shot-knowledge-distillation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810191.pdf,
+revisiting-batch-norm-initialization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810207.pdf,
+ssbnet-improving-visual-recognition-efficiency-by-adaptive-sampling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810224.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810224-supp.pdf
+filter-pruning-via-feature-discrimination-in-deep-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810241.pdf,
+la3-efficient-label-aware-autoaugment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810258.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810258-supp.pdf
+interpretations-steered-network-pruning-via-amortized-inferred-saliency-maps,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810274.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810274-supp.pdf
+ba-net-bridge-attention-for-deep-convolutional-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810293.pdf,
+sau-smooth-activation-function-using-convolution-with-approximate-identities,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810309.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810309-supp.zip
+multi-exit-semantic-segmentation-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810326.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810326-supp.pdf
+almost-orthogonal-layers-for-efficient-general-purpose-lipschitz-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810345.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810345-supp.pdf
+pointscatter-point-set-representation-for-tubular-structure-extraction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810361.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810361-supp.pdf
+check-and-link-pairwise-lesion-correspondence-guides-mammogram-mass-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810379.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810379-supp.pdf
+graph-constrained-contrastive-regularization-for-semi-weakly-volumetric-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810396.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810396-supp.pdf
+generalizable-medical-image-segmentation-via-random-amplitude-mixup-and-domain-specific-image-restoration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810415.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810415-supp.zip
+auto-fedrl-federated-hyperparameter-optimization-for-multi-institutional-medical-image-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810431.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810431-supp.pdf
+personalizing-federated-medical-image-segmentation-via-local-calibration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810449.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810449-supp.pdf
+one-shot-medical-landmark-localization-by-edge-guided-transform-and-noisy-landmark-refinement,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810466.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810466-supp.pdf
+ultra-high-resolution-unpaired-stain-transformation-via-kernelized-instance-normalization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810483.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810483-supp.pdf
+med-danet-dynamic-architecture-network-for-efficient-medical-volumetric-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810499.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810499-supp.pdf
+concl-concept-contrastive-learning-for-dense-prediction-pre-training-in-pathology-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810516.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810516-supp.pdf
+cryoai-amortized-inference-of-poses-for-ab-initio-reconstruction-of-3d-molecular-volumes-from-real-cryo-em-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810533.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810533-supp.pdf
+unimiss-universal-medical-self-supervised-learning-via-breaking-dimensionality-barrier,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810551.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810551-supp.pdf
+dlme-deep-local-flatness-manifold-embedding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810569.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810569-supp.pdf
+semi-supervised-keypoint-detector-and-descriptor-for-retinal-image-matching,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810586.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810586-supp.pdf
+graph-neural-network-for-cell-tracking-in-microscopy-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810602.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810602-supp.zip
+cxr-segmentation-by-adain-based-domain-adaptation-and-knowledge-distillation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810619.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810619-supp.pdf
+accurate-detection-of-proteins-in-cryo-electron-tomograms-from-sparse-labels,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810636.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810636-supp.pdf
+k-salsa-k-anonymous-synthetic-averaging-of-retinal-images-via-local-style-alignment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810652.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810652-supp.pdf
+radiotransformer-a-cascaded-global-focal-transformer-for-visual-attention-guided-disease-classification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810669.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810669-supp.pdf
+differentiable-zooming-for-multiple-instance-learning-on-whole-slide-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810689.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810689-supp.pdf
+learning-uncoupled-modulation-cvae-for-3d-action-conditioned-human-motion-synthesis,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810707.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810707-supp.zip
+towards-grand-unification-of-object-tracking,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810724.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136810724-supp.pdf
+bytetrack-multi-object-tracking-by-associating-every-detection-box,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820001-supp.pdf
+robust-multi-object-tracking-by-marginal-inference,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820020.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820020-supp.pdf
+polarmot-how-far-can-geometric-relations-take-us-in-3d-multi-object-tracking,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820038.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820038-supp.pdf
+particle-video-revisited-tracking-through-occlusions-using-point-trajectories,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820055.pdf,
+tracking-objects-as-pixel-wise-distributions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820072.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820072-supp.pdf
+cmt-context-matching-guided-transformer-for-3d-tracking-in-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820091.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820091-supp.pdf
+towards-generic-3d-tracking-in-rgbd-videos-benchmark-and-baseline,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820108.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820108-supp.pdf
+hierarchical-latent-structure-for-multi-modal-vehicle-trajectory-forecasting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820125.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820125-supp.pdf
+aiatrack-attention-in-attention-for-transformer-visual-tracking,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820141.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820141-supp.pdf
+disentangling-architecture-and-training-for-optical-flow,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820159.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820159-supp.pdf
+a-perturbation-constrained-adversarial-attack-for-evaluating-the-robustness-of-optical-flow,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820177.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820177-supp.pdf
+robust-landmark-based-stent-tracking-in-x-ray-fluoroscopy,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820195.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820195-supp.pdf
+social-ode-multi-agent-trajectory-forecasting-with-neural-ordinary-differential-equations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820211.pdf,
+social-ssl-self-supervised-cross-sequence-representation-learning-based-on-transformers-for-multi-agent-trajectory-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820227.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820227-supp.pdf
+diverse-human-motion-prediction-guided-by-multi-level-spatial-temporal-anchors,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820244.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820244-supp.pdf
+learning-pedestrian-group-representations-for-multi-modal-trajectory-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820263.pdf,
+sequential-multi-view-fusion-network-for-fast-lidar-point-motion-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820282.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820282-supp.pdf
+e-graph-minimal-solution-for-rigid-rotation-with-extensibility-graphs,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820298.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820298-supp.zip
+point-cloud-compression-with-range-image-based-entropy-model-for-autonomous-driving,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820315.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820315-supp.pdf
+joint-feature-learning-and-relation-modeling-for-tracking-a-one-stream-framework,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820332.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820332-supp.pdf
+motionclip-exposing-human-motion-generation-to-clip-space,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820349.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820349-supp.pdf
+backbone-is-all-your-need-a-simplified-architecture-for-visual-object-tracking,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820366.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820366-supp.pdf
+aware-of-the-history-trajectory-forecasting-with-the-local-behavior-data,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820383.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820383-supp.pdf
+optical-flow-training-under-limited-label-budget-via-active-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820400.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820400-supp.pdf
+hierarchical-feature-embedding-for-visual-tracking,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820418.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820418-supp.zip
+tackling-background-distraction-in-video-object-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820434.pdf,
+social-implicit-rethinking-trajectory-prediction-evaluation-and-the-effectiveness-of-implicit-maximum-likelihood-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820451.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820451-supp.pdf
+temos-generating-diverse-human-motions-from-textual-descriptions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820468.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820468-supp.pdf
+tracking-every-thing-in-the-wild,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820486.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820486-supp.pdf
+hulc-3d-human-motion-capture-with-pose-manifold-sampling-and-dense-contact-guidance,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820503.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820503-supp.zip
+towards-sequence-level-training-for-visual-tracking,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820521.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820521-supp.pdf
+learned-monocular-depth-priors-in-visual-inertial-initialization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820537.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820537-supp.pdf
+robust-visual-tracking-by-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820555.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820555-supp.zip
+meshloc-mesh-based-visual-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820573.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820573-supp.pdf
+s2f2-single-stage-flow-forecasting-for-future-multiple-trajectories-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820593.pdf,
+large-displacement-3d-object-tracking-with-hybrid-non-local-optimization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820609.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820609-supp.pdf
+fear-fast-efficient-accurate-and-robust-visual-tracker,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820625.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820625-supp.pdf
+pref-predictability-regularized-neural-motion-fields,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820643.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820643-supp.zip
+view-vertically-a-hierarchical-network-for-trajectory-prediction-via-fourier-spectrums,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820661.pdf,
+hvc-net-unifying-homography-visibility-and-confidence-learning-for-planar-object-tracking,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820679.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820679-supp.zip
+ramgan-region-attentive-morphing-gan-for-region-level-makeup-transfer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820696.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820696-supp.pdf
+sinnerf-training-neural-radiance-fields-on-complex-scenes-from-a-single-image,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820712.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820712-supp.pdf
+entropy-driven-sampling-and-training-scheme-for-conditional-diffusion-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820730.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136820730-supp.pdf
+accelerating-score-based-generative-models-with-preconditioned-diffusion-sampling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830001-supp.pdf
+learning-to-generate-realistic-lidar-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830017.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830017-supp.zip
+rfnet-4d-joint-object-reconstruction-and-flow-estimation-from-4d-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830036.pdf,
+diverse-image-inpainting-with-normalizing-flow,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830053.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830053-supp.pdf
+improved-masked-image-generation-with-token-critic,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830070.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830070-supp.pdf
+trend-truncated-generalized-normal-density-estimation-of-inception-embeddings-for-gan-evaluation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830087.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830087-supp.pdf
+exploring-gradient-based-multi-directional-controls-in-gans,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830103.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830103-supp.pdf
+spatially-invariant-unsupervised-3d-object-centric-learning-and-scene-decomposition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830120.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830120-supp.pdf
+neural-scene-decoration-from-a-single-photograph,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830137.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830137-supp.pdf
+outpainting-by-queries,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830154.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830154-supp.pdf
+unleashing-transformers-parallel-token-prediction-with-discrete-absorbing-diffusion-for-fast-high-resolution-image-generation-from-vector-quantized-codes,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830171.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830171-supp.zip
+chunkygan-real-image-inversion-via-segments,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830191.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830191-supp.zip
+gan-cocktail-mixing-gans-without-dataset-access,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830207.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830207-supp.pdf
+geometry-guided-progressive-nerf-for-generalizable-and-efficient-neural-human-rendering,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830224.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830224-supp.zip
+controllable-shadow-generation-using-pixel-height-maps,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830240.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830240-supp.pdf
+learning-where-to-look-generative-nas-is-surprisingly-efficient,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830257.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830257-supp.pdf
+subspace-diffusion-generative-models,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830274.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830274-supp.pdf
+duelgan-a-duel-between-two-discriminators-stabilizes-the-gan-training,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830290.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830290-supp.zip
+miner-multiscale-implicit-neural-representation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830308.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830308-supp.pdf
+an-embedded-feature-whitening-approach-to-deep-neural-network-optimization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830324.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830324-supp.pdf
+q-fw-a-hybrid-classical-quantum-frank-wolfe-for-quadratic-binary-optimization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830341.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830341-supp.pdf
+self-supervised-learning-of-visual-graph-matching,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830359.pdf,
+scalable-learning-to-optimize-a-learned-optimizer-can-train-big-models,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830376.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830376-supp.pdf
+qista-imagenet-a-deep-compressive-image-sensing-framework-solving-lq-norm-optimization-problem,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830394.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830394-supp.pdf
+r-dfcil-relation-guided-representation-learning-for-data-free-class-incremental-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830411.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830411-supp.pdf
+domain-generalization-by-mutual-information-regularization-with-pre-trained-models,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830427.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830427-supp.pdf
+predicting-is-not-understanding-recognizing-and-addressing-underspecification-in-machine-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830445.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830445-supp.pdf
+neural-sim-learning-to-generate-training-data-with-nerf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830463.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830463-supp.pdf
+bayesian-optimization-with-clustering-and-rollback-for-cnn-auto-pruning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830480.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830480-supp.pdf
+learned-variational-video-color-propagation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830497.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830497-supp.pdf
+continual-variational-autoencoder-learning-via-online-cooperative-memorization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830515.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830515-supp.pdf
+learning-to-learn-with-smooth-regularization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830533.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830533-supp.pdf
+incremental-task-learning-with-incremental-rank-updates,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830549.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830549-supp.pdf
+batch-efficient-eigendecomposition-for-small-and-medium-matrices,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830566.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830566-supp.pdf
+ensemble-learning-priors-driven-deep-unfolding-for-scalable-video-snapshot-compressive-imaging,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830583.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830583-supp.zip
+approximate-discrete-optimal-transport-plan-with-auxiliary-measure-method,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830602.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830602-supp.pdf
+a-comparative-study-of-graph-matching-algorithms-in-computer-vision,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830618.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830618-supp.pdf
+improving-generalization-in-federated-learning-by-seeking-flat-minima,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830636.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830636-supp.pdf
+semidefinite-relaxations-of-truncated-least-squares-in-robust-rotation-search-tight-or-not,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830655.pdf,
+transfer-without-forgetting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830672.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830672-supp.pdf
+adabest-minimizing-client-drift-in-federated-learning-via-adaptive-bias-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830690.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830690-supp.pdf
+tackling-long-tailed-category-distribution-under-domain-shifts,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830706.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830706-supp.pdf
+doubly-fused-vit-fuse-information-from-vision-transformer-doubly-with-local-representation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830723.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136830723-supp.pdf
+improving-vision-transformers-by-revisiting-high-frequency-components,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840001-supp.pdf
+recurrent-bilinear-optimization-for-binary-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840019.pdf,
+neural-architecture-search-for-spiking-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840036.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840036-supp.pdf
+where-to-focus-investigating-hierarchical-attention-relationship-for-fine-grained-visual-classification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840056.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840056-supp.pdf
+davit-dual-attention-vision-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840073.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840073-supp.pdf
+optimal-transport-for-label-efficient-visible-infrared-person-re-identification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840091.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840091-supp.pdf
+locality-guidance-for-improving-vision-transformers-on-tiny-datasets,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840108.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840108-supp.pdf
+neighborhood-collective-estimation-for-noisy-label-identification-and-correction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840126.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840126-supp.pdf
+few-shot-class-incremental-learning-via-entropy-regularized-data-free-replay,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840144.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840144-supp.pdf
+anti-retroactive-interference-for-lifelong-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840160.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840160-supp.pdf
+towards-calibrated-hyper-sphere-representation-via-distribution-overlap-coefficient-for-long-tailed-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840176.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840176-supp.pdf
+dynamic-metric-learning-with-cross-level-concept-distillation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840194.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840194-supp.pdf
+menet-a-memory-based-network-with-dual-branch-for-efficient-event-stream-processing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840211.pdf,
+out-of-distribution-detection-with-boundary-aware-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840232.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840232-supp.pdf
+learning-hierarchy-aware-features-for-reducing-mistake-severity,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840249.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840249-supp.pdf
+learning-to-detect-every-thing-in-an-open-world,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840265.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840265-supp.pdf
+kvt-k-nn-attention-for-boosting-vision-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840281.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840281-supp.pdf
+registration-based-few-shot-anomaly-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840300.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840300-supp.pdf
+improving-robustness-by-enhancing-weak-subnets,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840317.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840317-supp.pdf
+learning-invariant-visual-representations-for-compositional-zero-shot-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840335.pdf,
+improving-covariance-conditioning-of-the-svd-meta-layer-by-orthogonality,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840352.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840352-supp.pdf
+out-of-distribution-detection-with-semantic-mismatch-under-masking,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840369.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840369-supp.pdf
+data-free-neural-architecture-search-via-recursive-label-calibration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840386.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840386-supp.pdf
+learning-from-multiple-annotator-noisy-labels-via-sample-wise-label-fusion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840402.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840402-supp.pdf
+acknowledging-the-unknown-for-multi-label-learning-with-single-positive-labels,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840418.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840418-supp.pdf
+automix-unveiling-the-power-of-mixup-for-stronger-classifiers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840435.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840435-supp.pdf
+maxvit-multi-axis-vision-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840453.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840453-supp.pdf
+scalablevit-rethinking-the-context-oriented-generalization-of-vision-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840473.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840473-supp.pdf
+three-things-everyone-should-know-about-vision-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840490.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840490-supp.pdf
+deit-iii-revenge-of-the-vit,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840509.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840509-supp.pdf
+mixskd-self-knowledge-distillation-from-mixup-for-image-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840527.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840527-supp.pdf
+self-feature-distillation-with-uncertainty-modeling-for-degraded-image-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840544.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840544-supp.pdf
+novel-class-discovery-without-forgetting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840561.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840561-supp.pdf
+safa-sample-adaptive-feature-augmentation-for-long-tailed-image-classification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840578.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840578-supp.pdf
+negative-samples-are-at-large-leveraging-hard-distance-elastic-loss-for-re-identification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840595.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840595-supp.pdf
+discrete-constrained-regression-for-local-counting-models,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840612.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840612-supp.pdf
+breadcrumbs-adversarial-class-balanced-sampling-for-long-tailed-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840628.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840628-supp.pdf
+chairs-can-be-stood-on-overcoming-object-bias-in-human-object-interaction-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840645.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840645-supp.pdf
+a-fast-knowledge-distillation-framework-for-visual-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840663.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840663-supp.pdf
+dice-leveraging-sparsification-for-out-of-distribution-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840680.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840680-supp.pdf
+invariant-feature-learning-for-generalized-long-tailed-classification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840698.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840698-supp.pdf
+sliced-recursive-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840716.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136840716-supp.pdf
+cross-domain-ensemble-distillation-for-domain-generalization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850001-supp.pdf
+centrality-and-consistency-two-stage-clean-samples-identification-for-learning-with-instance-dependent-noisy-labels,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850021.pdf,
+hyperspherical-learning-in-multi-label-classification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850038.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850038-supp.pdf
+when-active-learning-meets-implicit-semantic-data-augmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850056.pdf,
+vl-ltr-learning-class-wise-visual-linguistic-representation-for-long-tailed-visual-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850072.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850072-supp.pdf
+class-is-invariant-to-context-and-vice-versa-on-learning-invariance-for-out-of-distribution-generalization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850089.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850089-supp.pdf
+hierarchical-semi-supervised-contrastive-learning-for-contamination-resistant-anomaly-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850107.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850107-supp.pdf
+tracking-by-associating-clips,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850126.pdf,
+realpatch-a-statistical-matching-framework-for-model-patching-with-real-samples,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850144.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850144-supp.pdf
+background-insensitive-scene-text-recognition-with-text-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850161.pdf,
+semantic-novelty-detection-via-relational-reasoning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850181.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850181-supp.pdf
+improving-closed-and-open-vocabulary-attribute-prediction-using-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850199.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850199-supp.pdf
+training-vision-transformers-with-only-2040-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850218.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850218-supp.pdf
+bridging-images-and-videos-a-simple-learning-framework-for-large-vocabulary-video-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850235.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850235-supp.pdf
+tdam-top-down-attention-module-for-contextually-guided-feature-selection-in-cnns,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850255.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850255-supp.pdf
+automatic-check-out-via-prototype-based-classifier-learning-from-single-product-exemplars,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850273.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850273-supp.pdf
+overcoming-shortcut-learning-in-a-target-domain-by-generalizing-basic-visual-factors-from-a-source-domain,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850290.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850290-supp.pdf
+photo-realistic-neural-domain-randomization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850306.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850306-supp.zip
+wave-vit-unifying-wavelet-and-transformers-for-visual-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850324.pdf,
+tailoring-self-supervision-for-supervised-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850342.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850342-supp.pdf
+difficulty-aware-simulator-for-open-set-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850360.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850360-supp.pdf
+few-shot-class-incremental-learning-from-an-open-set-perspective,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850377.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850377-supp.pdf
+foster-feature-boosting-and-compression-for-class-incremental-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850393.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850393-supp.pdf
+visual-knowledge-tracing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850410.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850410-supp.pdf
+s3c-self-supervised-stochastic-classifiers-for-few-shot-class-incremental-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850427.pdf,
+improving-fine-grained-visual-recognition-in-low-data-regimes-via-self-boosting-attention-mechanism,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850444.pdf,
+vsa-learning-varied-size-window-attention-in-vision-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850460.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850460-supp.pdf
+unbiased-manifold-augmentation-for-coarse-class-subdivision,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850478.pdf,
+densehybrid-hybrid-anomaly-detection-for-dense-open-set-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850494.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850494-supp.pdf
+rethinking-confidence-calibration-for-failure-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850512.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850512-supp.pdf
+uncertainty-guided-source-free-domain-adaptation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850530.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850530-supp.pdf
+should-all-proposals-be-treated-equally-in-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850549.pdf,
+vip-unified-certified-detection-and-recovery-for-patch-attack-with-vision-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850566.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850566-supp.pdf
+incdfm-incremental-deep-feature-modeling-for-continual-novelty-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850581.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850581-supp.pdf
+igformer-interaction-graph-transformer-for-skeleton-based-human-interaction-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850598.pdf,
+prime-a-few-primitives-can-boost-robustness-to-common-corruptions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850615.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850615-supp.pdf
+rotation-regularization-without-rotation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850632.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850632-supp.pdf
+towards-accurate-open-set-recognition-via-background-class-regularization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850648.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850648-supp.pdf
+in-defense-of-image-pre-training-for-spatiotemporal-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850665.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850665-supp.pdf
+augmenting-deep-classifiers-with-polynomial-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850682.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850682-supp.pdf
+learning-with-noisy-labels-by-efficient-transition-matrix-estimation-to-combat-label-miscorrection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850700.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850700-supp.pdf
+online-task-free-continual-learning-with-dynamic-sparse-distributed-memory,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136850721.pdf,
+contrastive-deep-supervision,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860001.pdf,
+discriminability-transferability-trade-off-an-information-theoretic-perspective,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860020.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860020-supp.pdf
+locvtp-video-text-pre-training-for-temporal-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860037.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860037-supp.pdf
+few-shot-end-to-end-object-detection-via-constantly-concentrated-encoding-across-heads,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860056.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860056-supp.pdf
+implicit-neural-representations-for-image-compression,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860073.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860073-supp.pdf
+lip-flow-learning-inference-time-priors-for-codec-avatars-via-normalizing-flows-in-latent-space,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860091.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860091-supp.pdf
+learning-to-drive-by-watching-youtube-videos-action-conditioned-contrastive-policy-pretraining,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860109.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860109-supp.pdf
+learning-ego-3d-representation-as-ray-tracing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860126.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860126-supp.pdf
+static-and-dynamic-concepts-for-self-supervised-video-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860142.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860142-supp.pdf
+spherefed-hyperspherical-federated-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860161.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860161-supp.pdf
+hierarchically-self-supervised-transformer-for-human-skeleton-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860181.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860181-supp.pdf
+posterior-refinement-on-metric-matrix-improves-generalization-bound-in-metric-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860199.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860199-supp.pdf
+balancing-stability-and-plasticity-through-advanced-null-space-in-continual-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860215.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860215-supp.pdf
+disco-remedying-self-supervised-learning-on-lightweight-models-with-distilled-contrastive-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860233.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860233-supp.pdf
+coscl-cooperation-of-small-continual-learners-is-stronger-than-a-big-one,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860249.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860249-supp.pdf
+manifold-adversarial-learning-for-cross-domain-3d-shape-representation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860266.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860266-supp.pdf
+fast-moco-boost-momentum-based-contrastive-learning-with-combinatorial-patches,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860283.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860283-supp.pdf
+lord-local-4d-implicit-representation-for-high-fidelity-dynamic-human-modeling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860299.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860299-supp.pdf
+on-the-versatile-uses-of-partial-distance-correlation-in-deep-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860318.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860318-supp.pdf
+self-regulated-feature-learning-via-teacher-free-feature-distillation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860337.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860337-supp.pdf
+balancing-between-forgetting-and-acquisition-in-incremental-subpopulation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860354.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860354-supp.pdf
+counterfactual-intervention-feature-transfer-for-visible-infrared-person-re-identification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860371.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860371-supp.pdf
+das-densely-anchored-sampling-for-deep-metric-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860388.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860388-supp.pdf
+learn-from-all-erasing-attention-consistency-for-noisy-label-facial-expression-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860406.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860406-supp.pdf
+a-non-isotropic-probabilistic-take-on-proxy-based-deep-metric-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860423.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860423-supp.pdf
+tokenmix-rethinking-image-mixing-for-data-augmentation-in-vision-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860442.pdf,
+ufo-unified-feature-optimization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860459.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860459-supp.pdf
+sound-localization-by-self-supervised-time-delay-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860476.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860476-supp.pdf
+x-learner-learning-cross-sources-and-tasks-for-universal-visual-representation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860495.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860495-supp.pdf
+slip-self-supervision-meets-language-image-pre-training,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860514.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860514-supp.pdf
+discovering-deformable-keypoint-pyramids,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860531.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860531-supp.pdf
+neural-video-compression-using-gans-for-detail-synthesis-and-propagation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860549.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860549-supp.pdf
+a-contrastive-objective-for-learning-disentangled-representations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860566.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860566-supp.pdf
+pt4al-using-self-supervised-pretext-tasks-for-active-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860583.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860583-supp.pdf
+parc-net-position-aware-circular-convolution-with-merits-from-convnets-and-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860600.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860600-supp.pdf
+dualprompt-complementary-prompting-for-rehearsal-free-continual-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860617.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860617-supp.pdf
+unifying-visual-contrastive-learning-for-object-recognition-from-a-graph-perspective,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860635.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860635-supp.pdf
+decoupled-contrastive-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860653.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860653-supp.pdf
+joint-learning-of-localized-representations-from-medical-images-and-reports,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860670.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860670-supp.pdf
+the-challenges-of-continuous-self-supervised-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860687.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860687-supp.pdf
+conditional-stroke-recovery-for-fine-grained-sketch-based-image-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860708.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860708-supp.pdf
+identifying-hard-noise-in-long-tailed-sample-distribution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860725.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136860725-supp.pdf
+relative-contrastive-loss-for-unsupervised-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870001-supp.pdf
+fine-grained-fashion-representation-learning-by-online-deep-clustering,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870019.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870019-supp.pdf
+nashae-disentangling-representations-through-adversarial-covariance-minimization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870036.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870036-supp.pdf
+a-gyrovector-space-approach-for-symmetric-positive-semi-definite-matrix-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870052.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870052-supp.pdf
+learning-visual-representation-from-modality-shared-contrastive-language-image-pre-training,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870069.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870069-supp.pdf
+contrasting-quadratic-assignments-for-set-based-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870087.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870087-supp.pdf
+class-incremental-learning-with-cross-space-clustering-and-controlled-transfer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870104.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870104-supp.pdf
+object-discovery-and-representation-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870121.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870121-supp.pdf
+trading-positional-complexity-vs-deepness-in-coordinate-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870142.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870142-supp.pdf
+mvdg-a-unified-multi-view-framework-for-domain-generalization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870158.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870158-supp.pdf
+panoptic-scene-graph-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870175.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870175-supp.pdf
+object-compositional-neural-implicit-surfaces,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870194.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870194-supp.pdf
+rignet-repetitive-image-guided-network-for-depth-completion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870211.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870211-supp.pdf
+fade-fusing-the-assets-of-decoder-and-encoder-for-task-agnostic-upsampling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870228.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870228-supp.pdf
+lidal-inter-frame-uncertainty-based-active-learning-for-3d-lidar-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870245.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870245-supp.pdf
+hierarchical-memory-learning-for-fine-grained-scene-graph-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870263.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870263-supp.pdf
+doda-data-oriented-sim-to-real-domain-adaptation-for-3d-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870280.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870280-supp.pdf
+mtformer-multi-task-learning-via-transformer-and-cross-task-reasoning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870299.pdf,
+monoplflownet-permutohedral-lattice-flownet-for-real-scale-3d-scene-flow-estimation-with-monocular-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870316.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870316-supp.pdf
+to-scene-a-large-scale-dataset-for-understanding-3d-tabletop-scenes,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870334.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870334-supp.pdf
+is-it-necessary-to-transfer-temporal-knowledge-for-domain-adaptive-video-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870351.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870351-supp.zip
+meta-spatio-temporal-debiasing-for-video-scene-graph-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870368.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870368-supp.pdf
+improving-the-reliability-for-confidence-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870385.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870385-supp.pdf
+fine-grained-scene-graph-generation-with-data-transfer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870402.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870402-supp.pdf
+pose2room-understanding-3d-scenes-from-human-activities,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870418.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870418-supp.zip
+towards-hard-positive-query-mining-for-detr-based-human-object-interaction-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870437.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870437-supp.pdf
+discovering-human-object-interaction-concepts-via-self-compositional-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870454.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870454-supp.pdf
+primitive-based-shape-abstraction-via-nonparametric-bayesian-inference,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870472.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870472-supp.pdf
+stereo-depth-estimation-with-echoes,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870489.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870489-supp.pdf
+inverted-pyramid-multi-task-transformer-for-dense-scene-understanding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870506.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870506-supp.pdf
+petr-position-embedding-transformation-for-multi-view-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870523.pdf,
+s2net-stochastic-sequential-pointcloud-forecasting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870541.pdf,
+ra-depth-resolution-adaptive-self-supervised-monocular-depth-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870557.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870557-supp.pdf
+polyphonicformer-unified-query-learning-for-depth-aware-video-panoptic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870574.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870574-supp.pdf
+sqn-weakly-supervised-semantic-segmentation-of-large-scale-3d-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870592.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870592-supp.pdf
+pointmixer-mlp-mixer-for-point-cloud-understanding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870611.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870611-supp.pdf
+initialization-and-alignment-for-adversarial-texture-optimization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870631.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870631-supp.pdf
+motr-end-to-end-multiple-object-tracking-with-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870648.pdf,
+gala-toward-geometry-and-lighting-aware-object-search-for-compositing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870665.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870665-supp.pdf
+lalaloc-global-floor-plan-comprehension-for-layout-localisation-in-unvisited-environments,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870681.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870681-supp.pdf
+3d-pl-domain-adaptive-depth-estimation-with-3d-aware-pseudo-labeling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870698.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870698-supp.pdf
+panoptic-partformer-learning-a-unified-model-for-panoptic-part-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870716.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136870716-supp.pdf
+salient-object-detection-for-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880001.pdf,
+learning-semantic-segmentation-from-multiple-datasets-with-label-shifts,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880019.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880019-supp.pdf
+weakly-supervised-3d-scene-segmentation-with-region-level-boundary-awareness-and-instance-discrimination,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880036.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880036-supp.pdf
+towards-open-vocabulary-scene-graph-generation-with-prompt-based-finetuning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880055.pdf,
+variance-aware-weight-initialization-for-point-convolutional-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880073.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880073-supp.pdf
+break-and-make-interactive-structural-understanding-using-lego-bricks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880089.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880089-supp.zip
+bi-pointflownet-bidirectional-learning-for-point-cloud-based-scene-flow-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880107.pdf,
+3dg-stfm-3d-geometric-guided-student-teacher-feature-matching,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880124.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880124-supp.zip
+video-restoration-framework-and-its-meta-adaptations-to-data-poor-conditions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880142.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880142-supp.pdf
+monteboxfinder-detecting-and-filtering-primitives-to-fit-a-noisy-point-cloud,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880160.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880160-supp.zip
+scene-text-recognition-with-permuted-autoregressive-sequence-models,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880177.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880177-supp.pdf
+when-counting-meets-hmer-counting-aware-network-for-handwritten-mathematical-expression-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880197.pdf,
+detecting-tampered-scene-text-in-the-wild,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880214.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880214-supp.pdf
+optimal-boxes-boosting-end-to-end-scene-text-recognition-by-adjusting-annotated-bounding-boxes-via-reinforcement-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880231.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880231-supp.pdf
+glass-global-to-local-attention-for-scene-text-spotting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880248.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880248-supp.pdf
+coo-comic-onomatopoeia-dataset-for-recognizing-arbitrary-or-truncated-texts,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880265.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880265-supp.pdf
+language-matters-a-weakly-supervised-vision-language-pre-training-approach-for-scene-text-detection-and-spotting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880282.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880282-supp.pdf
+toward-understanding-wordart-corner-guided-transformer-for-scene-text-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880301.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880301-supp.pdf
+levenshtein-ocr,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880319.pdf,
+multi-granularity-prediction-for-scene-text-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880336.pdf,
+dynamic-low-resolution-distillation-for-cost-efficient-end-to-end-text-spotting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880353.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880353-supp.pdf
+contextual-text-block-detection-towards-scene-text-understanding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880371.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880371-supp.pdf
+comer-modeling-coverage-for-transformer-based-handwritten-mathematical-expression-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880389.pdf,
+dont-forget-me-accurate-background-recovery-for-text-removal-via-modeling-local-global-context,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880406.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880406-supp.pdf
+textadain-paying-attention-to-shortcut-learning-in-text-recognizers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880423.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880423-supp.pdf
+multi-modal-text-recognition-networks-interactive-enhancements-between-visual-and-semantic-features,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880442.pdf,
+sgbanet-semantic-gan-and-balanced-attention-network-for-arbitrarily-oriented-scene-text-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880459.pdf,
+pure-transformer-with-integrated-experts-for-scene-text-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880476.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880476-supp.pdf
+ocr-free-document-understanding-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880493.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880493-supp.pdf
+car-class-aware-regularizations-for-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880514.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880514-supp.pdf
+style-hallucinated-dual-consistency-learning-for-domain-generalized-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880530.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880530-supp.pdf
+seqformer-sequential-transformer-for-video-instance-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880547.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880547-supp.pdf
+saliency-hierarchy-modeling-via-generative-kernels-for-salient-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880564.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880564-supp.pdf
+in-defense-of-online-models-for-video-instance-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880582.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880582-supp.pdf
+active-pointly-supervised-instance-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880599.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880599-supp.pdf
+a-transformer-based-decoder-for-semantic-segmentation-with-multi-level-context-mining,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880617.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880617-supp.pdf
+xmem-long-term-video-object-segmentation-with-an-atkinson-shiffrin-memory-model,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880633.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880633-supp.pdf
+self-distillation-for-robust-lidar-semantic-segmentation-in-autonomous-driving,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880650.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880650-supp.pdf
+2dpass-2d-priors-assisted-semantic-segmentation-on-lidar-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880668.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880668-supp.pdf
+extract-free-dense-labels-from-clip,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880687.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880687-supp.pdf
+3d-compositional-zero-shot-learning-with-decompositional-consensus,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880704.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880704-supp.pdf
+video-mask-transfiner-for-high-quality-video-instance-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880721.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136880721-supp.pdf
+box-supervised-instance-segmentation-with-level-set-evolution,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890001.pdf,
+point-primitive-transformer-for-long-term-4d-point-cloud-video-understanding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890018.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890018-supp.pdf
+adaptive-agent-transformer-for-few-shot-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890035.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890035-supp.zip
+waymo-open-dataset-panoramic-video-panoptic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890052.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890052-supp.zip
+transfgu-a-top-down-approach-to-fine-grained-unsupervised-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890072.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890072-supp.pdf
+adaafford-learning-to-adapt-manipulation-affordance-for-3d-articulated-objects-via-few-shot-interactions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890089.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890089-supp.zip
+cost-aggregation-with-4d-convolutional-swin-transformer-for-few-shot-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890106.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890106-supp.pdf
+fine-grained-egocentric-hand-object-segmentation-dataset-model-and-applications,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890125.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890125-supp.zip
+perceptual-artifacts-localization-for-inpainting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890145.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890145-supp.pdf
+2d-amodal-instance-segmentation-guided-by-3d-shape-prior,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890164.pdf,
+data-efficient-3d-learner-via-knowledge-transferred-from-2d-model,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890181.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890181-supp.pdf
+adaptive-spatial-bce-loss-for-weakly-supervised-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890198.pdf,
+dense-gaussian-processes-for-few-shot-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890215.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890215-supp.pdf
+3d-instances-as-1d-kernels,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890233.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890233-supp.pdf
+transmatting-enhancing-transparent-objects-matting-with-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890250.pdf,
+mvsalnet-multi-view-augmentation-for-rgb-d-salient-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890268.pdf,
+k-means-mask-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890286.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890286-supp.pdf
+segpgd-an-effective-and-efficient-adversarial-attack-for-evaluating-and-boosting-segmentation-robustness,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890306.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890306-supp.pdf
+adversarial-erasing-framework-via-triplet-with-gated-pyramid-pooling-layer-for-weakly-supervised-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890323.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890323-supp.pdf
+continual-semantic-segmentation-via-structure-preserving-and-projected-feature-alignment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890341.pdf,
+interclass-prototype-relation-for-few-shot-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890358.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890358-supp.pdf
+slim-scissors-segmenting-thin-object-from-synthetic-background,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890375.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890375-supp.pdf
+abstracting-sketches-through-simple-primitives,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890392.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890392-supp.pdf
+multi-scale-and-cross-scale-contrastive-learning-for-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890408.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890408-supp.pdf
+one-trimap-video-matting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890426.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890426-supp.pdf
+d2ada-dynamic-density-aware-active-domain-adaptation-for-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890443.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890443-supp.pdf
+learning-quality-aware-dynamic-memory-for-video-object-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890462.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890462-supp.pdf
+learning-implicit-feature-alignment-function-for-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890479.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890479-supp.pdf
+quantum-motion-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890497.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890497-supp.pdf
+instance-as-identity-a-generic-online-paradigm-for-video-instance-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890515.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890515-supp.zip
+laplacian-mesh-transformer-dual-attention-and-topology-aware-network-for-3d-mesh-classification-and-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890532.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890532-supp.pdf
+geodesic-former-a-geodesic-guided-few-shot-3d-point-cloud-instance-segmenter,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890552.pdf,
+union-set-multi-source-model-adaptation-for-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890570.pdf,
+point-mixswap-attentional-point-cloud-mixing-via-swapping-matched-structural-divisions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890587.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890587-supp.zip
+batman-bilateral-attention-transformer-in-motion-appearance-neighboring-space-for-video-object-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890603.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890603-supp.pdf
+spsn-superpixel-prototype-sampling-network-for-rgb-d-salient-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890621.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890621-supp.pdf
+global-spectral-filter-memory-network-for-video-object-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890639.pdf,
+video-instance-segmentation-via-multi-scale-spatio-temporal-split-attention-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890657.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890657-supp.pdf
+rankseg-adaptive-pixel-classification-with-image-category-ranking-for-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890673.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890673-supp.pdf
+learning-topological-interactions-for-multi-class-medical-image-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890691.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890691-supp.pdf
+unsupervised-segmentation-in-real-world-images-via-spelke-object-inference,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890708.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890708-supp.pdf
+a-simple-baseline-for-open-vocabulary-semantic-segmentation-with-pre-trained-vision-language-model,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890725.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136890725-supp.pdf
+fast-two-view-motion-segmentation-using-christoffel-polynomials,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900001-supp.pdf
+uctnet-uncertainty-aware-cross-modal-transformer-network-for-indoor-rgb-d-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900020.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900020-supp.pdf
+bi-directional-contrastive-learning-for-domain-adaptive-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900038.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900038-supp.pdf
+learning-regional-purity-for-instance-segmentation-on-3d-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900055.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900055-supp.pdf
+cross-domain-few-shot-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900072.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900072-supp.pdf
+generative-subgraph-contrast-for-self-supervised-graph-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900090.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900090-supp.pdf
+sdae-self-distillated-masked-autoencoder,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900107.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900107-supp.pdf
+demystifying-unsupervised-semantic-correspondence-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900124.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900124-supp.pdf
+open-set-semi-supervised-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900142.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900142-supp.pdf
+vibration-based-uncertainty-estimation-for-learning-from-limited-supervision,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900160.pdf,
+concurrent-subsidiary-supervision-for-unsupervised-source-free-domain-adaptation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900177.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900177-supp.pdf
+weakly-supervised-object-localization-through-inter-class-feature-similarity-and-intra-class-appearance-consistency,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900194.pdf,
+active-learning-strategies-for-weakly-supervised-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900210.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900210-supp.pdf
+mc-beit-multi-choice-discretization-for-image-bert-pre-training,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900229.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900229-supp.pdf
+bootstrapped-masked-autoencoders-for-vision-bert-pretraining,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900246.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900246-supp.pdf
+unsupervised-visual-representation-learning-by-synchronous-momentum-grouping,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900264.pdf,
+improving-few-shot-part-segmentation-using-coarse-supervision,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900282.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900282-supp.pdf
+what-to-hide-from-your-students-attention-guided-masked-image-modeling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900299.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900299-supp.pdf
+pointly-supervised-panoptic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900318.pdf,
+mvp-multimodality-guided-visual-pre-training,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900336.pdf,
+locally-varying-distance-transform-for-unsupervised-visual-anomaly-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900353.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900353-supp.pdf
+hrda-context-aware-high-resolution-domain-adaptive-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900370.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900370-supp.pdf
+spot-the-difference-self-supervised-pre-training-for-anomaly-detection-and-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900389.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900389-supp.pdf
+dual-domain-self-supervised-learning-and-model-adaption-for-deep-compressive-imaging,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900406.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900406-supp.pdf
+unsupervised-selective-labeling-for-more-effective-semi-supervised-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900423.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900423-supp.pdf
+max-pooling-with-vision-transformers-reconciles-class-and-shape-in-weakly-supervised-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900442.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900442-supp.pdf
+dense-siamese-network-for-dense-unsupervised-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900460.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900460-supp.pdf
+multi-granularity-distillation-scheme-towards-lightweight-semi-supervised-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900477.pdf,
+cp2-copy-paste-contrastive-pretraining-for-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900494.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900494-supp.pdf
+self-filtering-a-noise-aware-sample-selection-for-label-noise-with-confidence-penalization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900511.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900511-supp.pdf
+rda-reciprocal-distribution-alignment-for-robust-semi-supervised-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900527.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900527-supp.pdf
+memsac-memory-augmented-sample-consistency-for-large-scale-domain-adaptation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900543.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900543-supp.pdf
+united-defocus-blur-detection-and-deblurring-via-adversarial-promoting-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900562.pdf,
+synergistic-self-supervised-and-quantization-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900579.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900579-supp.pdf
+semi-supervised-vision-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900596.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900596-supp.pdf
+domain-adaptive-video-segmentation-via-temporal-pseudo-supervision,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900612.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900612-supp.pdf
+diverse-learner-exploring-diverse-supervision-for-semi-supervised-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900631.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900631-supp.pdf
+a-closer-look-at-invariances-in-self-supervised-pre-training-for-3d-vision,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900647.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900647-supp.pdf
+conmatch-semi-supervised-learning-with-confidence-guided-consistency-regularization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900665.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900665-supp.pdf
+fedx-unsupervised-federated-learning-with-cross-knowledge-distillation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900682.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900682-supp.pdf
+w2n-switching-from-weak-supervision-to-noisy-supervision-for-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900699.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900699-supp.pdf
+decoupled-adversarial-contrastive-learning-for-self-supervised-adversarial-robustness,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900716.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136900716-supp.pdf
+goca-guided-online-cluster-assignment-for-self-supervised-video-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910001.pdf,
+constrained-mean-shift-using-distant-yet-related-neighbors-for-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910021.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910021-supp.pdf
+revisiting-the-critical-factors-of-augmentation-invariant-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910040.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910040-supp.pdf
+ca-ssl-class-agnostic-semi-supervised-learning-for-detection-and-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910057.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910057-supp.pdf
+dual-adaptive-transformations-for-weakly-supervised-point-cloud-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910075.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910075-supp.pdf
+semantic-aware-fine-grained-correspondence,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910093.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910093-supp.zip
+self-supervised-classification-network,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910112.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910112-supp.pdf
+data-invariants-to-understand-unsupervised-out-of-distribution-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910129.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910129-supp.pdf
+domain-invariant-masked-autoencoders-for-self-supervised-learning-from-multi-domains,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910147.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910147-supp.pdf
+semi-supervised-object-detection-via-virtual-category-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910164.pdf,
+completely-self-supervised-crowd-counting-via-distribution-matching,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910180.pdf,
+coarse-to-fine-incremental-few-shot-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910199.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910199-supp.pdf
+learning-unbiased-transferability-for-domain-adaptation-by-uncertainty-modeling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910216.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910216-supp.pdf
+learn2augment-learning-to-composite-videos-for-data-augmentation-in-action-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910234.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910234-supp.pdf
+cyborgs-contrastively-bootstrapping-object-representations-by-grounding-in-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910251.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910251-supp.pdf
+pss-progressive-sample-selection-for-open-world-visual-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910269.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910269-supp.pdf
+improving-self-supervised-lightweight-model-learning-via-hard-aware-metric-distillation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910286.pdf,
+object-discovery-via-contrastive-learning-for-weakly-supervised-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910302.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910302-supp.pdf
+stochastic-consensus-enhancing-semi-supervised-learning-with-consistency-of-stochastic-classifiers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910319.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910319-supp.pdf
+diffusemorph-unsupervised-deformable-image-registration-using-diffusion-model,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910336.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910336-supp.pdf
+semi-leak-membership-inference-attacks-against-semi-supervised-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910353.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910353-supp.pdf
+openldn-learning-to-discover-novel-classes-for-open-world-semi-supervised-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910370.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910370-supp.pdf
+embedding-contrastive-unsupervised-features-to-cluster-in-and-out-of-distribution-noise-in-corrupted-image-datasets,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910389.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910389-supp.pdf
+unsupervised-few-shot-image-classification-by-learning-features-into-clustering-space,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910406.pdf,
+towards-realistic-semi-supervised-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910423.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910423-supp.pdf
+masked-siamese-networks-for-label-efficient-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910442.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910442-supp.pdf
+natural-synthetic-anomalies-for-self-supervised-anomaly-detection-and-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910459.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910459-supp.pdf
+understanding-collapse-in-non-contrastive-siamese-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910476.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910476-supp.pdf
+federated-self-supervised-learning-for-video-understanding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910492.pdf,
+towards-efficient-and-effective-self-supervised-learning-of-visual-representations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910509.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910509-supp.pdf
+dsr-a-dual-subspace-re-projection-network-for-surface-anomaly-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910526.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910526-supp.pdf
+pseudoaugment-learning-to-use-unlabeled-data-for-data-augmentation-in-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910542.pdf,
+mvster-epipolar-transformer-for-efficient-multi-view-stereo,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910561.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910561-supp.pdf
+relpose-predicting-probabilistic-relative-rotation-for-single-objects-in-the-wild,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910580.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910580-supp.pdf
+r2l-distilling-neural-radiance-field-to-neural-light-field-for-efficient-novel-view-synthesis,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910598.pdf,
+kd-mvs-knowledge-distillation-based-self-supervised-learning-for-multi-view-stereo,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910615.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910615-supp.pdf
+salve-semantic-alignment-verification-for-floorplan-reconstruction-from-sparse-panoramas,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910632.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910632-supp.pdf
+rc-mvsnet-unsupervised-multi-view-stereo-with-neural-rendering,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910649.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910649-supp.zip
+box2mask-weakly-supervised-3d-semantic-instance-segmentation-using-bounding-boxes,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910666.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910666-supp.pdf
+neilf-neural-incident-light-field-for-physically-based-material-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910684.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910684-supp.zip
+arf-artistic-radiance-fields,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910701.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910701-supp.pdf
+multiview-stereo-with-cascaded-epipolar-raft,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910718.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136910718-supp.pdf
+arah-animatable-volume-rendering-of-articulated-human-sdfs,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920001-supp.pdf
+aspanformer-detector-free-image-matching-with-adaptive-span-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920020.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920020-supp.pdf
+ndf-neural-deformable-fields-for-dynamic-human-modelling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920037.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920037-supp.pdf
+neural-density-distance-fields,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920053.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920053-supp.zip
+next-towards-high-quality-neural-radiance-fields-via-multi-skip-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920069.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920069-supp.pdf
+learning-online-multi-sensor-depth-fusion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920088.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920088-supp.pdf
+bungeenerf-progressive-neural-radiance-field-for-extreme-multi-scale-scene-rendering,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920106.pdf,
+decomposing-the-tangent-of-occluding-boundaries-according-to-curvatures-and-torsions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920123.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920123-supp.pdf
+neuris-neural-reconstruction-of-indoor-scenes-using-normal-priors,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920139.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920139-supp.pdf
+generalizable-patch-based-neural-rendering,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920156.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920156-supp.pdf
+improving-rgb-d-point-cloud-registration-by-learning-multi-scale-local-linear-transformation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920175.pdf,
+real-time-neural-character-rendering-with-pose-guided-multiplane-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920192.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920192-supp.pdf
+sparseneus-fast-generalizable-neural-surface-reconstruction-from-sparse-views,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920210.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920210-supp.pdf
+disentangling-object-motion-and-occlusion-for-unsupervised-multi-frame-monocular-depth,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920228.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920228-supp.pdf
+depth-field-networks-for-generalizable-multi-view-scene-representation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920245.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920245-supp.zip
+context-enhanced-stereo-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920263.pdf,
+pcw-net-pyramid-combination-and-warping-cost-volume-for-stereo-matching,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920280.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920280-supp.pdf
+gen6d-generalizable-model-free-6-dof-object-pose-estimation-from-rgb-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920297.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920297-supp.pdf
+latency-aware-collaborative-perception,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920315.pdf,
+tensorf-tensorial-radiance-fields,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920332.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920332-supp.pdf
+nefsac-neurally-filtered-minimal-samples,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920350.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920350-supp.pdf
+snes-learning-probably-symmetric-neural-surfaces-from-incomplete-data,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920366.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920366-supp.zip
+hdr-plenoxels-self-calibrating-high-dynamic-range-radiance-fields,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920383.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920383-supp.pdf
+neuman-neural-human-radiance-field-from-a-single-video,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920400.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920400-supp.zip
+tava-template-free-animatable-volumetric-actors,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920417.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920417-supp.pdf
+easnet-searching-elastic-and-accurate-network-architecture-for-stereo-matching,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920434.pdf,
+relative-pose-from-sift-features,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920451.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920451-supp.zip
+selection-and-cross-similarity-for-event-image-deep-stereo,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920467.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920467-supp.pdf
+d3net-a-unified-speaker-listener-architecture-for-3d-dense-captioning-and-visual-grounding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920484.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920484-supp.pdf
+circle-convolutional-implicit-reconstruction-and-completion-for-large-scale-indoor-scene,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920502.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920502-supp.pdf
+particlesfm-exploiting-dense-point-trajectories-for-localizing-moving-cameras-in-the-wild,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920519.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920519-supp.pdf
+4dcontrast-contrastive-learning-with-dynamic-correspondences-for-3d-scene-understanding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920539.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920539-supp.pdf
+few-zero-level-set-shot-learning-of-shape-signed-distance-functions-in-feature-space,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920556.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920556-supp.pdf
+solution-space-analysis-of-essential-matrix-based-on-algebraic-error-minimization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920574.pdf,
+approximate-differentiable-rendering-with-algebraic-surfaces,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920591.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920591-supp.pdf
+covispose-co-visibility-pose-transformer-for-wide-baseline-relative-pose-estimation-in-360deg-indoor-panoramas,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920610.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920610-supp.pdf
+affine-correspondences-between-multi-camera-systems-for-6dof-relative-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920629.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920629-supp.zip
+graphfit-learning-multi-scale-graph-convolutional-representation-for-point-cloud-normal-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920646.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920646-supp.pdf
+is-mvsnet-importance-sampling-based-mvsnet,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920663.pdf,
+point-scene-understanding-via-disentangled-instance-mesh-reconstruction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920679.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920679-supp.pdf
+diffustereo-high-quality-human-reconstruction-via-diffusion-based-stereo-using-sparse-cameras,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920697.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920697-supp.pdf
+space-partitioning-ransac,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920715.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136920715-supp.zip
+simplerecon-3d-reconstruction-without-3d-convolutions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930001-supp.pdf
+structure-and-motion-from-casual-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930020.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930020-supp.pdf
+what-matters-for-3d-scene-flow-network,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930036.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930036-supp.pdf
+correspondence-reweighted-translation-averaging,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930053.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930053-supp.pdf
+neural-strands-learning-hair-geometry-and-appearance-from-multi-view-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930070.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930070-supp.zip
+graphcspn-geometry-aware-depth-completion-via-dynamic-gcns,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930087.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930087-supp.zip
+objects-can-move-3d-change-detection-by-geometric-transformation-consistency,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930104.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930104-supp.pdf
+language-grounded-indoor-3d-semantic-segmentation-in-the-wild,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930121.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930121-supp.zip
+beyond-periodicity-towards-a-unifying-framework-for-activations-in-coordinate-mlps,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930139.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930139-supp.pdf
+deforming-radiance-fields-with-cages,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930155.pdf,
+flex-extrinsic-parameters-free-multi-view-3d-human-motion-reconstruction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930172.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930172-supp.pdf
+mode-multi-view-omnidirectional-depth-estimation-with-360deg-cameras,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930192.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930192-supp.pdf
+gigadepth-learning-depth-from-structured-light-with-branching-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930209.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930209-supp.pdf
+activenerf-learning-where-to-see-with-uncertainty-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930225.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930225-supp.pdf
+posernet-refining-relative-camera-poses-exploiting-object-detections,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930242.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930242-supp.pdf
+gaussian-activated-neural-radiance-fields-for-high-fidelity-reconstruction-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930259.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930259-supp.pdf
+unbiased-gradient-estimation-for-differentiable-surface-splatting-via-poisson-sampling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930276.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930276-supp.pdf
+towards-learning-neural-representations-from-shadows,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930295.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930295-supp.pdf
+class-incremental-novel-class-discovery,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930312.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930312-supp.pdf
+unknown-oriented-learning-for-open-set-domain-adaptation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930328.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930328-supp.pdf
+prototype-guided-continual-adaptation-for-class-incremental-unsupervised-domain-adaptation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930345.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930345-supp.pdf
+decouplenet-decoupled-network-for-domain-adaptive-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930362.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930362-supp.pdf
+class-agnostic-object-counting-robust-to-intraclass-diversity,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930380.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930380-supp.pdf
+burn-after-reading-online-adaptation-for-cross-domain-streaming-data,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930396.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930396-supp.pdf
+mind-the-gap-in-distilling-stylegans,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930416.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930416-supp.pdf
+improving-test-time-adaptation-via-shift-agnostic-weight-regularization-and-nearest-source-prototypes,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930433.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930433-supp.pdf
+learning-instance-specific-adaptation-for-cross-domain-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930451.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930451-supp.pdf
+regioncl-exploring-contrastive-region-pairs-for-self-supervised-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930468.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930468-supp.pdf
+long-tailed-class-incremental-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930486.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930486-supp.pdf
+dlcft-deep-linear-continual-fine-tuning-for-general-incremental-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930503.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930503-supp.pdf
+adversarial-partial-domain-adaptation-by-cycle-inconsistency,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930520.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930520-supp.pdf
+combating-label-distribution-shift-for-active-domain-adaptation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930539.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930539-supp.pdf
+gipso-geometrically-informed-propagation-for-online-adaptation-in-3d-lidar-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930557.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930557-supp.pdf
+cosmix-compositional-semantic-mix-for-domain-adaptation-in-3d-lidar-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930575.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930575-supp.pdf
+a-unified-framework-for-domain-adaptive-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930592.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930592-supp.pdf
+a-broad-study-of-pre-training-for-domain-generalization-and-adaptation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930609.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930609-supp.pdf
+prior-knowledge-guided-unsupervised-domain-adaptation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930628.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930628-supp.pdf
+gcisg-guided-causal-invariant-learning-for-improved-syn-to-real-generalization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930644.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930644-supp.pdf
+acrofod-an-adaptive-method-for-cross-domain-few-shot-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930661.pdf,
+unsupervised-domain-adaptation-for-one-stage-object-detector-using-offsets-to-bounding-box,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930679.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930679-supp.pdf
+visual-prompt-tuning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930696.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930696-supp.pdf
+quasi-balanced-self-training-on-noise-aware-synthesis-of-object-point-clouds-for-closing-domain-gap,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930715.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136930715-supp.pdf
+interpretable-open-set-domain-adaptation-via-angular-margin-separation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940001-supp.pdf
+tacs-taxonomy-adaptive-cross-domain-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940019.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940019-supp.pdf
+prototypical-contrast-adaptation-for-domain-adaptive-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940036.pdf,
+rbc-rectifying-the-biased-context-in-continual-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940054.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940054-supp.pdf
+factorizing-knowledge-in-neural-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940072.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940072-supp.pdf
+contrastive-vicinal-space-for-unsupervised-domain-adaptation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940090.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940090-supp.pdf
+cross-modal-knowledge-transfer-without-task-relevant-source-data,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940108.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940108-supp.pdf
+online-domain-adaptation-for-semantic-segmentation-in-ever-changing-conditions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940125.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940125-supp.pdf
+source-free-video-domain-adaptation-by-learning-temporal-consistency-for-action-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940144.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940144-supp.pdf
+bmd-a-general-class-balanced-multicentric-dynamic-prototype-strategy-for-source-free-domain-adaptation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940161.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940161-supp.pdf
+generalized-brain-image-synthesis-with-transferable-convolutional-sparse-coding-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940178.pdf,
+incomplete-multi-view-domain-adaptation-via-channel-enhancement-and-knowledge-transfer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940194.pdf,
+distpro-searching-a-fast-knowledge-distillation-process-via-meta-optimization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940211.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940211-supp.pdf
+ml-bpm-multi-teacher-learning-with-bidirectional-photometric-mixing-for-open-compound-domain-adaptation-in-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940228.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940228-supp.pdf
+pactran-pac-bayesian-metrics-for-estimating-the-transferability-of-pretrained-models-to-classification-tasks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940244.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940244-supp.pdf
+personalized-education-blind-knowledge-distillation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940262.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940262-supp.pdf
+not-all-models-are-equal-predicting-model-transferability-in-a-self-challenging-fisher-space,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940279.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940279-supp.pdf
+how-stable-are-transferability-metrics-evaluations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940296.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940296-supp.pdf
+attention-diversification-for-domain-generalization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940315.pdf,
+ess-learning-event-based-semantic-segmentation-from-still-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940334.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940334-supp.pdf
+an-efficient-spatio-temporal-pyramid-transformer-for-action-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940350.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940350-supp.pdf
+human-trajectory-prediction-via-neural-social-physics,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940368.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940368-supp.pdf
+towards-open-set-video-anomaly-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940387.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940387-supp.pdf
+eclipse-efficient-long-range-video-retrieval-using-sight-and-sound,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940405.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940405-supp.zip
+joint-modal-label-denoising-for-weakly-supervised-audio-visual-video-parsing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940424.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940424-supp.pdf
+less-than-few-self-shot-video-instance-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940442.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940442-supp.pdf
+adaptive-face-forgery-detection-in-cross-domain,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940460.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940460-supp.pdf
+real-time-online-video-detection-with-temporal-smoothing-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940478.pdf,
+tallformer-temporal-action-localization-with-a-long-memory-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940495.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940495-supp.pdf
+mining-relations-among-cross-frame-affinities-for-video-semantic-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940513.pdf,
+tl-dw-summarizing-instructional-videos-with-task-relevance-cross-modal-saliency,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940530.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940530-supp.pdf
+rethinking-learning-approaches-for-long-term-action-anticipation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940547.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940547-supp.zip
+dualformer-local-global-stratified-transformer-for-efficient-video-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940566.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940566-supp.pdf
+hierarchical-feature-alignment-network-for-unsupervised-video-object-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940584.pdf,
+pac-net-highlight-your-video-via-history-preference-modeling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940602.pdf,
+how-severe-is-benchmark-sensitivity-in-video-self-supervised-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940620.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940620-supp.pdf
+a-sliding-window-scheme-for-online-temporal-action-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940640.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940640-supp.pdf
+era-expert-retrieval-and-assembly-for-early-action-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940657.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940657-supp.pdf
+dual-perspective-network-for-audio-visual-event-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940676.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940676-supp.pdf
+nsnet-non-saliency-suppression-sampler-for-efficient-video-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940692.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940692-supp.pdf
+video-activity-localisation-with-uncertainties-in-temporal-boundary,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940710.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940710-supp.pdf
+temporal-saliency-query-network-for-efficient-video-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940727.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136940727-supp.pdf
+efficient-one-stage-video-object-detection-by-exploiting-temporal-consistency,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950001-supp.pdf
+leveraging-action-affinity-and-continuity-for-semi-supervised-temporal-action-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950017.pdf,
+spotting-temporally-precise-fine-grained-events-in-video,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950033.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950033-supp.pdf
+unified-fully-and-timestamp-supervised-temporal-action-segmentation-via-sequence-to-sequence-translation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950052.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950052-supp.pdf
+efficient-video-transformers-with-spatial-temporal-token-selection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950068.pdf,
+long-movie-clip-classification-with-state-space-video-models,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950086.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950086-supp.pdf
+prompting-visual-language-models-for-efficient-video-understanding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950104.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950104-supp.zip
+asymmetric-relation-consistency-reasoning-for-video-relation-grounding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950124.pdf,
+self-supervised-social-relation-representation-for-human-group-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950140.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950140-supp.pdf
+k-centered-patch-sampling-for-efficient-video-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950157.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950157-supp.pdf
+a-deep-moving-camera-background-model,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950175.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950175-supp.zip
+graphvid-it-only-takes-a-few-nodes-to-understand-a-video,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950192.pdf,
+delta-distillation-for-efficient-video-processing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950209.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950209-supp.pdf
+morphmlp-an-efficient-mlp-like-backbone-for-spatial-temporal-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950226.pdf,
+composer-compositional-reasoning-of-group-activity-in-videos-with-keypoint-only-modality,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950245.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950245-supp.pdf
+e-nerv-expedite-neural-video-representation-with-disentangled-spatial-temporal-context,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950263.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950263-supp.pdf
+tdvit-temporal-dilated-video-transformer-for-dense-video-tasks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950281.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950281-supp.pdf
+semi-supervised-learning-of-optical-flow-by-flow-supervisor,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950298.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950298-supp.pdf
+flow-graph-to-video-grounding-for-weakly-supervised-multi-step-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950315.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950315-supp.pdf
+deep-360deg-optical-flow-estimation-based-on-multi-projection-fusion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950332.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950332-supp.zip
+maclr-motion-aware-contrastive-learning-of-representations-for-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950349.pdf,
+learning-long-term-spatial-temporal-graphs-for-active-speaker-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950367.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950367-supp.zip
+frozen-clip-models-are-efficient-video-learners,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950384.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950384-supp.pdf
+pip-physical-interaction-prediction-via-mental-simulation-with-span-selection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950401.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950401-supp.pdf
+panoramic-vision-transformer-for-saliency-detection-in-360deg-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950419.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950419-supp.pdf
+bayesian-tracking-of-video-graphs-using-joint-kalman-smoothing-and-registration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950436.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950436-supp.zip
+motion-sensitive-contrastive-learning-for-self-supervised-video-representation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950453.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950453-supp.pdf
+dynamic-temporal-filtering-in-video-models,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950470.pdf,
+tip-adapter-training-free-adaption-of-clip-for-few-shot-classification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950487.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950487-supp.pdf
+temporal-lift-pooling-for-continuous-sign-language-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950506.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950506-supp.pdf
+more-multi-order-relation-mining-for-dense-captioning-in-3d-scenes,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950523.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950523-supp.pdf
+siri-a-simple-selective-retraining-mechanism-for-transformer-based-visual-grounding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950541.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950541-supp.pdf
+cross-modal-prototype-driven-network-for-radiology-report-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950558.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950558-supp.pdf
+tm2t-stochastic-and-tokenized-modeling-for-the-reciprocal-generation-of-3d-human-motions-and-texts,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950575.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950575-supp.pdf
+seqtr-a-simple-yet-universal-network-for-visual-grounding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950593.pdf,
+vtc-improving-video-text-retrieval-with-user-comments,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950611.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950611-supp.pdf
+fashionvil-fashion-focused-vision-and-language-representation-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950629.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950629-supp.pdf
+weakly-supervised-grounding-for-vqa-in-vision-language-transformers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950647.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950647-supp.pdf
+automatic-dense-annotation-of-large-vocabulary-sign-language-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950666.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950666-supp.pdf
+miles-visual-bert-pre-training-with-injected-language-semantics-for-video-text-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950685.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950685-supp.pdf
+geb-a-benchmark-for-generic-event-boundary-captioning-grounding-and-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950703.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950703-supp.pdf
+a-simple-and-robust-correlation-filtering-method-for-text-based-person-search,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136950719.pdf,
+making-the-most-of-text-semantics-to-improve-biomedical-vision-language-processing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960001-supp.pdf
+generative-negative-text-replay-for-continual-vision-language-pretraining,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960022.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960022-supp.pdf
+video-graph-transformer-for-video-question-answering,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960039.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960039-supp.pdf
+trace-controlled-text-to-image-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960058.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960058-supp.pdf
+video-question-answering-with-iterative-video-text-co-tokenization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960075.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960075-supp.pdf
+rethinking-data-augmentation-for-robust-visual-question-answering,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960094.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960094-supp.pdf
+explicit-image-caption-editing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960111.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960111-supp.pdf
+can-shuffling-video-benefit-temporal-bias-problem-a-novel-training-framework-for-temporal-grounding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960128.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960128-supp.pdf
+reliable-visual-question-answering-abstain-rather-than-answer-incorrectly,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960146.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960146-supp.pdf
+grit-faster-and-better-image-captioning-transformer-using-dual-visual-features,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960165.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960165-supp.pdf
+selective-query-guided-debiasing-for-video-corpus-moment-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960183.pdf,
+spatial-and-visual-perspective-taking-via-view-rotation-and-relation-reasoning-for-embodied-reference-understanding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960199.pdf,
+object-centric-unsupervised-image-captioning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960217.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960217-supp.pdf
+contrastive-vision-language-pre-training-with-limited-resources,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960234.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960234-supp.pdf
+learning-linguistic-association-towards-efficient-text-video-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960251.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960251-supp.pdf
+assister-assistive-navigation-via-conditional-instruction-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960269.pdf,
+x-detr-a-versatile-architecture-for-instance-wise-vision-language-tasks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960288.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960288-supp.pdf
+learning-disentanglement-with-decoupled-labels-for-vision-language-navigation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960305.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960305-supp.pdf
+switch-bert-learning-to-model-multimodal-interactions-by-switching-attention-and-input,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960325.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960325-supp.pdf
+word-level-fine-grained-story-visualization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960342.pdf,
+unifying-event-detection-and-captioning-as-sequence-generation-via-pre-training,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960358.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960358-supp.pdf
+multimodal-transformer-with-variable-length-memory-for-vision-and-language-navigation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960375.pdf,
+fine-grained-visual-entailment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960393.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960393-supp.pdf
+bottom-up-top-down-detection-transformers-for-language-grounding-in-images-and-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960411.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960411-supp.pdf
+new-datasets-and-models-for-contextual-reasoning-in-visual-dialog,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960428.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960428-supp.pdf
+visagesyntalk-unseen-speaker-video-to-speech-synthesis-via-speech-visage-feature-selection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960445.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960445-supp.zip
+classification-regression-for-chart-comprehension,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960462.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960462-supp.pdf
+assistq-affordance-centric-question-driven-task-completion-for-egocentric-assistant,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960478.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960478-supp.pdf
+findit-generalized-localization-with-natural-language-queries,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960495.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960495-supp.pdf
+unitab-unifying-text-and-box-outputs-for-grounded-vision-language-modeling,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960514.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960514-supp.pdf
+scaling-open-vocabulary-image-segmentation-with-image-level-labels,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960532.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960532-supp.pdf
+the-abduction-of-sherlock-holmes-a-dataset-for-visual-abductive-reasoning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960549.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960549-supp.pdf
+speaker-adaptive-lip-reading-with-user-dependent-padding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960567.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960567-supp.pdf
+tise-bag-of-metrics-for-text-to-image-synthesis-evaluation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960585.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960585-supp.pdf
+semaug-semantically-meaningful-image-augmentations-for-object-detection-through-language-grounding,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960602.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960602-supp.pdf
+referring-object-manipulation-of-natural-images-with-conditional-classifier-free-guidance,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960619.pdf,
+newsstories-illustrating-articles-with-visual-summaries,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960636.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960636-supp.pdf
+webly-supervised-concept-expansion-for-general-purpose-vision-models,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960654.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960654-supp.pdf
+fedvln-privacy-preserving-federated-vision-and-language-navigation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960673.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960673-supp.pdf
+coder-coupled-diversity-sensitive-momentum-contrastive-learning-for-image-text-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960691.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960691-supp.pdf
+language-driven-artistic-style-transfer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960708.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960708-supp.pdf
+single-stream-multi-level-alignment-for-vision-language-pretraining,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960725.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136960725-supp.pdf
+most-and-least-retrievable-images-in-visual-language-query-systems,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970001-supp.pdf
+sports-video-analysis-on-large-scale-data,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970019.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970019-supp.pdf
+grounding-visual-representations-with-texts-for-domain-generalization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970037.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970037-supp.pdf
+bridging-the-visual-semantic-gap-in-vln-via-semantically-richer-instructions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970054.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970054-supp.pdf
+storydall-e-adapting-pretrained-text-to-image-transformers-for-story-continuation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970070.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970070-supp.pdf
+vqgan-clip-open-domain-image-generation-and-editing-with-natural-language-guidance,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970088.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970088-supp.pdf
+semantic-aware-implicit-neural-audio-driven-video-portrait-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970105.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970105-supp.pdf
+end-to-end-active-speaker-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970124.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970124-supp.pdf
+emotion-recognition-for-multiple-context-awareness,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970141.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970141-supp.pdf
+adaptive-fine-grained-sketch-based-image-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970160.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970160-supp.pdf
+quantized-gan-for-complex-music-generation-from-dance-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970177.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970177-supp.pdf
+uncertainty-aware-multi-modal-learning-via-cross-modal-random-network-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970195.pdf,
+localizing-visual-sounds-the-easy-way,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970212.pdf,
+learning-visual-styles-from-audio-visual-associations,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970229.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970229-supp.pdf
+remote-respiration-monitoring-of-moving-person-using-radio-signals,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970248.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970248-supp.pdf
+camera-pose-estimation-and-localization-with-active-audio-sensing,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970266.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970266-supp.pdf
+pacs-a-dataset-for-physical-audiovisual-commonsense-reasoning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970286.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970286-supp.zip
+vovit-low-latency-graph-based-audio-visual-voice-separation-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970304.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970304-supp.zip
+telepresence-video-quality-assessment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970321.pdf,
+multimae-multi-modal-multi-task-masked-autoencoders,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970341.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970341-supp.zip
+audioscopev2-audio-visual-attention-architectures-for-calibrated-open-domain-on-screen-sound-separation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970360.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970360-supp.pdf
+audio-visual-segmentation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970378.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970378-supp.pdf
+unsupervised-night-image-enhancement-when-layer-decomposition-meets-light-effects-suppression,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970396.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970396-supp.pdf
+relationformer-a-unified-framework-for-image-to-graph-generation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970414.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970414-supp.pdf
+gama-cross-view-video-geo-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970432.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970432-supp.pdf
+revisiting-a-knn-based-image-classification-system-with-high-capacity-storage,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970449.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970449-supp.pdf
+geometric-representation-learning-for-document-image-rectification,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970466.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970466-supp.pdf
+s2-ver-semi-supervised-visual-emotion-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970483.pdf,
+image-coding-for-machines-with-omnipotent-feature-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970500.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970500-supp.pdf
+feature-representation-learning-for-unsupervised-cross-domain-image-retrieval,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970518.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970518-supp.pdf
+fashionformer-a-simple-effective-and-unified-baseline-for-human-fashion-segmentation-and-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970534.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970534-supp.pdf
+semantic-guided-multi-mask-image-harmonization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970552.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970552-supp.pdf
+learning-an-isometric-surface-parameterization-for-texture-unwrapping,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970568.pdf,
+towards-regression-free-neural-networks-for-diverse-compute-platforms,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970587.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970587-supp.pdf
+relationship-spatialization-for-depth-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970603.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970603-supp.pdf
+image2point-3d-point-cloud-understanding-with-2d-image-pretrained-models,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970625.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970625-supp.pdf
+far-fourier-aerial-video-recognition,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970644.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970644-supp.zip
+translating-a-visual-lego-manual-to-a-machine-executable-plan,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970663.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970663-supp.pdf
+fabric-material-recovery-from-video-using-multi-scale-geometric-auto-encoder,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970680.pdf,
+megba-a-gpu-based-distributed-library-for-large-scale-bundle-adjustment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970698.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970698-supp.pdf
+the-one-where-they-reconstructed-3d-humans-and-environments-in-tv-shows,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970714.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136970714-supp.pdf
+talisman-targeted-active-learning-for-object-detection-with-rare-classes-and-slices-using-submodular-mutual-information,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980001.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980001-supp.pdf
+an-efficient-person-clustering-algorithm-for-open-checkout-free-groceries,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980017.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980017-supp.zip
+pop-mining-potential-performance-of-new-fashion-products-via-webly-cross-modal-query-expansion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980034.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980034-supp.pdf
+pose-forecasting-in-industrial-human-robot-collaboration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980051.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980051-supp.pdf
+actor-centered-representations-for-action-localization-in-streaming-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980070.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980070-supp.zip
+bandwidth-aware-adaptive-codec-for-dnn-inference-offloading-in-iot,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980087.pdf,
+domain-knowledge-informed-self-supervised-representations-for-workout-form-assessment,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980104.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980104-supp.zip
+responsive-listening-head-generation-a-benchmark-dataset-and-baseline,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980122.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980122-supp.pdf
+towards-scale-aware-robust-and-generalizable-unsupervised-monocular-depth-estimation-by-integrating-imu-motion-dynamics,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980140.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980140-supp.pdf
+tips-text-induced-pose-synthesis,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980157.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980157-supp.pdf
+addressing-heterogeneity-in-federated-learning-via-distributional-transformation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980175.pdf,
+where-in-the-world-is-this-image-transformer-based-geo-localization-in-the-wild,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980193.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980193-supp.pdf
+colorization-for-in-situ-marine-plankton-images,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980212.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980212-supp.pdf
+efficient-deep-visual-and-inertial-odometry-with-adaptive-visual-modality-selection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980229.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980229-supp.pdf
+a-sketch-is-worth-a-thousand-words-image-retrieval-with-text-and-sketch,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980247.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980247-supp.pdf
+a-cloud-3d-dataset-and-application-specific-learned-image-compression-in-cloud-3d,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980265.pdf,
+autotransition-learning-to-recommend-video-transition-effects,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980282.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980282-supp.zip
+online-segmentation-of-lidar-sequences-dataset-and-algorithm,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980298.pdf,
+open-world-semantic-segmentation-for-lidar-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980315.pdf,
+king-generating-safety-critical-driving-scenarios-for-robust-imitation-via-kinematics-gradients,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980332.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980332-supp.pdf
+differentiable-raycasting-for-self-supervised-occupancy-forecasting,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980349.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980349-supp.zip
+inaction-interpretable-action-decision-making-for-autonomous-driving,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980365.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980365-supp.pdf
+cramnet-camera-radar-fusion-with-ray-constrained-cross-attention-for-robust-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980382.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980382-supp.pdf
+coda-a-real-world-road-corner-case-dataset-for-object-detection-in-autonomous-driving,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980399.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980399-supp.pdf
+motion-inspired-unsupervised-perception-and-prediction-in-autonomous-driving,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980416.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980416-supp.pdf
+stretchbev-stretching-future-instance-prediction-spatially-and-temporally,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980436.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980436-supp.pdf
+rclane-relay-chain-prediction-for-lane-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980453.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980453-supp.pdf
+drive-segment-unsupervised-semantic-segmentation-of-urban-scenes-via-cross-modal-distillation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980469.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980469-supp.pdf
+centerformer-center-based-transformer-for-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980487.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980487-supp.pdf
+physical-attack-on-monocular-depth-estimation-with-optimal-adversarial-patches,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980504.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980504-supp.pdf
+st-p3-end-to-end-vision-based-autonomous-driving-via-spatial-temporal-feature-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980522.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980522-supp.pdf
+persformer-3d-lane-detection-via-perspective-transformer-and-the-openlane-benchmark,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980539.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980539-supp.pdf
+pointfix-learning-to-fix-domain-bias-for-robust-online-stereo-adaptation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980557.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980557-supp.zip
+brnet-exploring-comprehensive-features-for-monocular-depth-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980574.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980574-supp.pdf
+siamdoge-domain-generalizable-semantic-segmentation-using-siamese-network,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980590.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980590-supp.pdf
+context-aware-streaming-perception-in-dynamic-environments,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980608.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980608-supp.zip
+spot-spatiotemporal-modeling-for-3d-object-tracking,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980624.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980624-supp.pdf
+multimodal-transformer-for-automatic-3d-annotation-and-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980641.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980641-supp.pdf
+dynamic-3d-scene-analysis-by-point-cloud-accumulation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980658.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980658-supp.pdf
+homogeneous-multi-modal-feature-fusion-and-interaction-for-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980675.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980675-supp.pdf
+jperceiver-joint-perception-network-for-depth-pose-and-layout-estimation-in-driving-scenes,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980692.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980692-supp.pdf
+semi-supervised-3d-object-detection-with-proficient-teachers,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980710.pdf,
+point-cloud-compression-with-sibling-context-and-surface-priors,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980726.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136980726-supp.pdf
+lane-detection-transformer-based-on-multi-frame-horizontal-and-vertical-attention-and-visual-transformer-module,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990001.pdf,
+proposalcontrast-unsupervised-pre-training-for-lidar-based-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990017.pdf,
+pretram-self-supervised-pre-training-via-connecting-trajectory-and-map,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990034.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990034-supp.pdf
+master-of-all-simultaneous-generalization-of-urban-scene-segmentation-to-all-adverse-weather-conditions,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990051.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990051-supp.pdf
+less-label-efficient-semantic-segmentation-for-lidar-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990070.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990070-supp.pdf
+visual-cross-view-metric-localization-with-dense-uncertainty-estimates,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990089.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990089-supp.zip
+v2x-vit-vehicle-to-everything-cooperative-perception-with-vision-transformer,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990106.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990106-supp.pdf
+devnet-self-supervised-monocular-depth-learning-via-density-volume-construction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990123.pdf,
+action-based-contrastive-learning-for-trajectory-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990140.pdf,
+radatron-accurate-detection-using-multi-resolution-cascaded-mimo-radar,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990157.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990157-supp.zip
+lidar-distillation-bridging-the-beam-induced-domain-gap-for-3d-object-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990175.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990175-supp.zip
+efficient-point-cloud-segmentation-with-geometry-aware-sparse-networks,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990193.pdf,
+fh-net-a-fast-hierarchical-network-for-scene-flow-estimation-on-real-world-point-clouds,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990210.pdf,
+spatialdetr-robust-scalable-transformer-based-3d-object-detection-from-multi-view-camera-images-with-global-cross-sensor-attention,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990226.pdf,
+pixel-wise-energy-biased-abstention-learning-for-anomaly-segmentation-on-complex-urban-driving-scenes,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990242.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990242-supp.pdf
+rethinking-closed-loop-training-for-autonomous-driving,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990259.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990259-supp.zip
+slide-self-supervised-lidar-de-snowing-through-reconstruction-difficulty,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990277.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990277-supp.pdf
+generative-meta-adversarial-network-for-unseen-object-navigation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990295.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990295-supp.pdf
+object-manipulation-via-visual-target-localization,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990314.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990314-supp.zip
+moda-map-style-transfer-for-self-supervised-domain-adaptation-of-embodied-agents,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990332.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990332-supp.zip
+housekeep-tidying-virtual-households-using-commonsense-reasoning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990350.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990350-supp.pdf
+domain-randomization-enhanced-depth-simulation-and-restoration-for-perceiving-and-grasping-specular-and-transparent-objects,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990369.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990369-supp.pdf
+resolving-copycat-problems-in-visual-imitation-learning-via-residual-action-prediction,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990386.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990386-supp.pdf
+opd-single-view-3d-openable-part-detection,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990404.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990404-supp.zip
+airdet-few-shot-detection-without-fine-tuning-for-autonomous-exploration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990421.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990421-supp.pdf
+transgrasp-grasp-pose-estimation-of-a-category-of-objects-by-transferring-grasps-from-only-one-labeled-instance,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990438.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990438-supp.pdf
+starformer-transformer-with-state-action-reward-representations-for-visual-reinforcement-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990455.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990455-supp.pdf
+tidee-tidying-up-novel-rooms-using-visuo-semantic-commonsense-priors,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990473.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990473-supp.pdf
+learning-efficient-multi-agent-cooperative-visual-exploration,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990491.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990491-supp.pdf
+zero-shot-category-level-object-pose-estimation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990509.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990509-supp.pdf
+sim-to-real-6d-object-pose-estimation-via-iterative-self-training-for-robotic-bin-picking,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990526.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990526-supp.pdf
+active-audio-visual-separation-of-dynamic-sound-sources,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990543.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990543-supp.pdf
+dexmv-imitation-learning-for-dexterous-manipulation-from-human-videos,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990562.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990562-supp.pdf
+sim-2-sim-transfer-for-vision-and-language-navigation-in-continuous-environments,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990580.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990580-supp.zip
+style-agnostic-reinforcement-learning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990596.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990596-supp.zip
+self-supervised-interactive-object-segmentation-through-a-singulation-and-grasping-approach,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990613.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990613-supp.pdf
+learning-from-unlabeled-3d-environments-for-vision-and-language-navigation,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990630.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990630-supp.pdf
+bodyslam-joint-camera-localisation-mapping-and-human-motion-tracking,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990648.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990648-supp.zip
+fusionvae-a-deep-hierarchical-variational-autoencoder-for-rgb-image-fusion,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990666.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990666-supp.pdf
+learning-algebraic-representation-for-systematic-generalization-in-abstract-reasoning,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990683.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990683-supp.pdf
+video-dialog-as-conversation-about-objects-living-in-space-time,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990701.pdf,https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136990701-supp.pdf
diff --git a/te_u/paper_down_load/eccv_download.py b/te_u/paper_down_load/eccv_download.py
new file mode 100644
index 0000000..29412ae
--- /dev/null
+++ b/te_u/paper_down_load/eccv_download.py
@@ -0,0 +1,658 @@
+from bs4 import BeautifulSoup
+import pickle
+import os
+
+os.environ['http_proxy'] = '127.0.0.1:7890'
+os.environ['https_proxy'] = '127.0.0.1:7890'
+
+from tqdm import tqdm
+from slugify import slugify
+import csv
+import sys
+
+import urllib
+import random
+from urllib.error import URLError, HTTPError
+
+import requests
+
+
+class Downloader:
+ def __init__(self, downloader=None, is_random_step=None):
+ pass
+
+ def download(self, urls=None, save_path=None, time_sleep_in_seconds=None):
+ print(urls)
+ headers = {
+ 'User-Agent':
+ 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
+ content = urlopen_with_retry(url=urls, headers=headers)
+ with open(save_path, 'wb') as f:
+ f.write(content)
+
+
+def download_from_csv_i(
+ postfix=None, save_dir=None, csv_file_path=None, is_download_main_paper=True,
+ is_download_bib=True, is_download_supplement=True,
+ time_step_in_seconds=5, total_paper_number=None,
+ downloader='IDM', is_random_step=True):
+ """
+ download paper, bibtex and supplement files and save them to
+ save_dir/main_paper and save_dir/supplement respectively
+ :param postfix: str, postfix that will be added at the end of papers' title
+ :param save_dir: str, paper and supplement material's save path
+ :param csv_file_path: str, the full path to csv file
+ :param is_download_main_paper: bool, True for downloading main paper
+ :param is_download_supplement: bool, True for downloading supplemental
+ material
+ :param time_step_in_seconds: int, the interval time between two downloading
+ request in seconds
+ :param total_paper_number: int, the total number of papers that is going to
+ download
+ :param downloader: str, the downloader to download, could be 'IDM' or None,
+ default to 'IDM'.
+ :param is_random_step: bool, whether random sample the time step between two
+ adjacent download requests. If True, the time step will be sampled
+ from Uniform(0.5t, 1.5t), where t is the given time_step_in_seconds.
+ Default: True.
+ :return: True
+ """
+ downloader = Downloader(
+ downloader=downloader, is_random_step=is_random_step)
+ if not os.path.exists(csv_file_path):
+ raise ValueError(f'ERROR: file not found in {csv_file_path}!!!')
+
+ main_save_path = os.path.join(save_dir, 'main_paper')
+ if is_download_main_paper:
+ os.makedirs(main_save_path, exist_ok=True)
+ if is_download_supplement:
+ supplement_save_path = os.path.join(save_dir, 'supplement')
+ os.makedirs(supplement_save_path, exist_ok=True)
+
+ error_log = []
+ with open(csv_file_path, newline='') as csvfile:
+ myreader = csv.DictReader(csvfile, delimiter=',')
+ pbar = tqdm(myreader, total=total_paper_number)
+ i = 0
+ for this_paper in pbar:
+ is_download_bib &= ('bib' in this_paper)
+ is_grouped = ('group' in this_paper)
+ i += 1
+ # get title
+ if is_grouped:
+ group = slugify(this_paper['group'])
+ title = slugify(this_paper['title'])
+ if total_paper_number is not None:
+ pbar.set_description(
+ f'Downloading {postfix} paper {i} /{total_paper_number}')
+ else:
+ pbar.set_description(f'Downloading {postfix} paper {i}')
+ this_paper_main_path = os.path.join(
+ main_save_path, f'{title}_{postfix}.pdf')
+ if is_grouped:
+ this_paper_main_path = os.path.join(
+ main_save_path, group, f'{title}_{postfix}.pdf')
+ if is_download_supplement:
+ this_paper_supp_path_no_ext = os.path.join(
+ supplement_save_path, f'{title}_{postfix}_supp.')
+ if is_grouped:
+ this_paper_supp_path_no_ext = os.path.join(
+ supplement_save_path, group, f'{title}_{postfix}_supp.')
+ if '' != this_paper['supplemental link'] and os.path.exists(
+ this_paper_main_path) and \
+ (os.path.exists(
+ this_paper_supp_path_no_ext + 'zip') or
+ os.path.exists(
+ this_paper_supp_path_no_ext + 'pdf')):
+ continue
+ elif '' == this_paper['supplemental link'] and \
+ os.path.exists(this_paper_main_path):
+ continue
+ elif os.path.exists(this_paper_main_path):
+ continue
+ if 'error' == this_paper['main link']:
+ error_log.append((title, 'no MAIN link'))
+ elif '' != this_paper['main link']:
+ if is_grouped:
+ if is_download_main_paper:
+ os.makedirs(os.path.join(main_save_path, group),
+ exist_ok=True)
+ if is_download_supplement:
+ os.makedirs(os.path.join(supplement_save_path, group),
+ exist_ok=True)
+ if is_download_main_paper:
+ try:
+ # download paper with IDM
+ if not os.path.exists(this_paper_main_path):
+ downloader.download(
+ urls=this_paper['main link'].replace(
+ ' ', '%20'),
+ save_path=os.path.join(
+ os.getcwd(), this_paper_main_path),
+ time_sleep_in_seconds=time_step_in_seconds
+ )
+ except Exception as e:
+ # error_flag = True
+ print('Error: ' + title + ' - ' + str(e))
+ error_log.append((title, this_paper['main link'],
+ 'main paper download error', str(e)))
+ # download supp
+ if is_download_supplement:
+ # check whether the supp can be downloaded
+ if not (os.path.exists(
+ this_paper_supp_path_no_ext + 'zip') or
+ os.path.exists(
+ this_paper_supp_path_no_ext + 'pdf')):
+ if 'error' == this_paper['supplemental link']:
+ error_log.append((title, 'no SUPPLEMENTAL link'))
+ elif '' != this_paper['supplemental link']:
+ supp_type = \
+ this_paper['supplemental link'].split('.')[-1]
+ try:
+ downloader.download(
+ urls=this_paper['supplemental link'],
+ save_path=os.path.join(
+ os.getcwd(),
+ this_paper_supp_path_no_ext + supp_type),
+ time_sleep_in_seconds=time_step_in_seconds
+ )
+ except Exception as e:
+ # error_flag = True
+ print('Error: ' + title + ' - ' + str(e))
+ error_log.append((title, this_paper[
+ 'supplemental link'],
+ 'supplement download error',
+ str(e)))
+ # download bibtex file
+ if is_download_bib:
+ bib_path = this_paper_main_path[:-3] + 'bib'
+ if not os.path.exists(bib_path):
+ if 'error' == this_paper['bib']:
+ error_log.append((title, 'no bibtex link'))
+ elif '' != this_paper['bib']:
+ try:
+ downloader.download(
+ urls=this_paper['bib'],
+ save_path=os.path.join(os.getcwd(),
+ bib_path),
+ time_sleep_in_seconds=time_step_in_seconds
+ )
+ except Exception as e:
+ # error_flag = True
+ print('Error: ' + title + ' - ' + str(e))
+ error_log.append((title, this_paper['bib'],
+ 'bibtex download error',
+ str(e)))
+
+ # 2. write error log
+ print('write error log')
+ return True
+
+
+def get_paper_name_link_from_url(url):
+ headers = {
+ 'User-Agent':
+ 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
+ paper_dict = dict()
+ content = urlopen_with_retry(url=url, headers=headers)
+ soup = BeautifulSoup(content, 'html5lib')
+ paper_list_bar = tqdm(soup.find_all(['li'], {'class': 'chapter-item content-type-list__item'}))
+ for paper in paper_list_bar:
+ try:
+ title = slugify(paper.find('div', {'class': 'content-type-list__title'}).text)
+ link = urllib.parse.urljoin(url, paper.find('div', {'class': 'content-type-list__action'}).a.get('href'))
+ paper_dict[title] = link
+ except Exception as e:
+ print(f'ERROR: {str(e)}')
+ return paper_dict
+
+
+def urlopen_with_retry(url, headers=dict(), retry_time=3, time_out=20,
+ raise_error_if_failed=True):
+ """
+ load content from url with given headers. Retry if error occurs.
+ Args:
+ url (str): url.
+ headers (dict): request headers. Default: {}.
+ retry_time (int): max retry time. Default: 3.
+ time_out (int): time out in seconds. Default: 10.
+ raise_error_if_failed (bool): whether to raise error if failed.
+ Default: True.
+
+ Returns:
+ content(str|None): url content. None will be returned if failed.
+
+ """
+ res = requests.get(url=url, headers=headers)
+
+ # req = urllib.request.Request(url=url, headers=headers)
+ for r in range(retry_time):
+ try:
+ # content = urllib.request.urlopen(req, timeout=time_out).read()
+ content = res.content
+ return content
+ except HTTPError as e:
+ print('The server couldn\'t fulfill the request.')
+ print('Error code: ', e.code)
+ s = random.randint(3, 7)
+ print(f'random sleeping {s} seconds and doing {r + 1}/{retry_time}'
+ f'-th retrying...')
+ except URLError as e:
+ print('We failed to reach a server.')
+ print('Reason: ', e.reason)
+ s = random.randint(3, 7)
+ print(f'random sleeping {s} seconds and doing {r + 1}/{retry_time}'
+ f'-th retrying...')
+ if raise_error_if_failed:
+ raise ValueError(f'Failed to open {url} after trying {retry_time} '
+ f'times!')
+ else:
+ return None
+
+
+def save_csv(year):
+ """
+ write ECCV papers' and supplemental material's urls in one csv file
+ :param year: int
+ :return: True
+ """
+ project_root_folder = r"D:\py\keyan_qingbao\te_u\paper_down_load"
+ csv_file_pathname = os.path.join(
+ project_root_folder, 'csv', f'ECCV_{year}.csv')
+ with open(csv_file_pathname, 'w', newline='') as csvfile:
+ fieldnames = ['title', 'main link', 'supplemental link']
+ writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
+ writer.writeheader()
+ headers = {
+ 'User-Agent':
+ 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) '
+ 'Gecko/20100101 Firefox/23.0'}
+ dat_file_pathname = os.path.join(
+ project_root_folder, 'urls', f'init_url_ECCV_{year}.dat')
+ if year >= 2018:
+ init_url = f'https://www.ecva.net/papers.php'
+ if os.path.exists(dat_file_pathname):
+ with open(dat_file_pathname, 'rb') as f:
+ content = pickle.load(f)
+ else:
+ content = urlopen_with_retry(url=init_url, headers=headers)
+ with open(dat_file_pathname, 'wb') as f:
+ pickle.dump(content, f)
+ soup = BeautifulSoup(content, 'html5lib')
+ paper_list_bar = tqdm(soup.find_all(['dt', 'dd']))
+ paper_index = 0
+ paper_dict = {'title': '',
+ 'main link': '',
+ 'supplemental link': ''}
+ for paper in paper_list_bar:
+ is_new_paper = False
+
+ # get title
+ try:
+ if 'dt' == paper.name and \
+ 'ptitle' == paper.get('class')[0] and \
+ year == int(paper.a.get('href').split('_')[1][:4]): # title:
+ # this_year = int(paper.a.get('href').split('_')[1][:4])
+ title = slugify(paper.text.strip())
+ paper_dict['title'] = title
+ paper_index += 1
+ paper_list_bar.set_description_str(
+ f'Downloading paper {paper_index}: {title}')
+ elif '' != paper_dict['title'] and 'dd' == paper.name:
+ all_as = paper.find_all('a')
+ for a in all_as:
+ if 'pdf' == slugify(a.text.strip()):
+ main_link = urllib.parse.urljoin(init_url,
+ a.get('href'))
+ paper_dict['main link'] = main_link
+ is_new_paper = True
+ elif 'supp' == slugify(a.text.strip())[:4]:
+ supp_link = urllib.parse.urljoin(init_url,
+ a.get('href'))
+ paper_dict['supplemental link'] = supp_link
+ break
+ except:
+ pass
+ if is_new_paper:
+ writer.writerow(paper_dict)
+ paper_dict = {'title': '',
+ 'main link': '',
+ 'supplemental link': ''}
+ else:
+ init_url = f'http://www.eccv{year}.org/main-conference/'
+ if os.path.exists(dat_file_pathname):
+ with open(dat_file_pathname, 'rb') as f:
+ content = pickle.load(f)
+ else:
+ content = urlopen_with_retry(url=init_url, headers=headers)
+ with open(dat_file_pathname, 'wb') as f:
+ pickle.dump(content, f)
+ soup = BeautifulSoup(content, 'html5lib')
+ paper_list_bar = tqdm(
+ soup.find('div', {'class': 'entry-content'}).find_all(['p']))
+ paper_index = 0
+ paper_dict = {'title': '',
+ 'main link': '',
+ 'supplemental link': ''}
+ for paper in paper_list_bar:
+ try:
+ if len(paper.find_all(['strong'])) and len(
+ paper.find_all(['a'])) and len(paper.find_all(['img'])):
+ paper_index += 1
+ title = slugify(paper.find('strong').text)
+ paper_dict['title'] = title
+ paper_list_bar.set_description_str(
+ f'Downloading paper {paper_index}: {title}')
+ main_link = paper.find('a').get('href')
+ paper_dict['main link'] = main_link
+ writer.writerow(paper_dict)
+ paper_dict = {'title': '',
+ 'main link': '',
+ 'supplemental link': ''}
+ except Exception as e:
+ print(f'ERROR: {str(e)}')
+ return paper_index
+
+
+def download_from_csv(
+ year, save_dir, is_download_supplement=True, time_step_in_seconds=5,
+ total_paper_number=None,
+ is_workshops=False, downloader='IDM'):
+ """
+ download all ECCV paper and supplement files given year, restore in
+ save_dir/main_paper and save_dir/supplement respectively
+ :param year: int, ECCV year, such 2019
+ :param save_dir: str, paper and supplement material's save path
+ :param is_download_supplement: bool, True for downloading supplemental
+ material
+ :param time_step_in_seconds: int, the interval time between two downlaod
+ request in seconds
+ :param total_paper_number: int, the total number of papers that is going
+ to download
+ :param is_workshops: bool, is to download workshops from csv file.
+ :param downloader: str, the downloader to download, could be 'IDM' or
+ 'Thunder', default to 'IDM'
+ :return: True
+ """
+ postfix = f'ECCV_{year}'
+ if is_workshops:
+ postfix = f'ECCV_WS_{year}'
+ csv_file_name = f'ECCV_{year}.csv' if not is_workshops else \
+ f'ECCV_WS_{year}.csv'
+ project_root_folder = r"D:\py\keyan_qingbao\te_u\paper_down_load"
+ csv_file_name = os.path.join(project_root_folder, 'csv', csv_file_name)
+ download_from_csv_i(
+ postfix=postfix,
+ save_dir=save_dir,
+ csv_file_path=csv_file_name,
+ is_download_supplement=is_download_supplement,
+ time_step_in_seconds=time_step_in_seconds,
+ total_paper_number=total_paper_number,
+ downloader=downloader
+ )
+
+
+def download_from_springer(
+ year, save_dir, is_workshops=False, time_sleep_in_seconds=5,
+ downloader='IDM'):
+ os.makedirs(save_dir, exist_ok=True)
+ if 2018 == year:
+ if not is_workshops:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/978-3-030-01246-5',
+ 'https://link.springer.com/book/10.1007/978-3-030-01216-8',
+ 'https://link.springer.com/book/10.1007/978-3-030-01219-9',
+ 'https://link.springer.com/book/10.1007/978-3-030-01225-0',
+ 'https://link.springer.com/book/10.1007/978-3-030-01228-1',
+ 'https://link.springer.com/book/10.1007/978-3-030-01231-1',
+ 'https://link.springer.com/book/10.1007/978-3-030-01234-2',
+ 'https://link.springer.com/book/10.1007/978-3-030-01237-3',
+ 'https://link.springer.com/book/10.1007/978-3-030-01240-3',
+ 'https://link.springer.com/book/10.1007/978-3-030-01249-6',
+ 'https://link.springer.com/book/10.1007/978-3-030-01252-6',
+ 'https://link.springer.com/book/10.1007/978-3-030-01258-8',
+ 'https://link.springer.com/book/10.1007/978-3-030-01261-8',
+ 'https://link.springer.com/book/10.1007/978-3-030-01264-9',
+ 'https://link.springer.com/book/10.1007/978-3-030-01267-0',
+ 'https://link.springer.com/book/10.1007/978-3-030-01270-0'
+ ]
+ else:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/978-3-030-11009-3',
+ 'https://link.springer.com/book/10.1007/978-3-030-11012-3',
+ 'https://link.springer.com/book/10.1007/978-3-030-11015-4',
+ 'https://link.springer.com/book/10.1007/978-3-030-11018-5',
+ 'https://link.springer.com/book/10.1007/978-3-030-11021-5',
+ 'https://link.springer.com/book/10.1007/978-3-030-11024-6'
+ ]
+ elif 2016 == year:
+ if not is_workshops:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007%2F978-3-319-46448-0',
+ 'https://link.springer.com/book/10.1007%2F978-3-319-46475-6',
+ 'https://link.springer.com/book/10.1007%2F978-3-319-46487-9',
+ 'https://link.springer.com/book/10.1007%2F978-3-319-46493-0',
+ 'https://link.springer.com/book/10.1007%2F978-3-319-46454-1',
+ 'https://link.springer.com/book/10.1007%2F978-3-319-46466-4',
+ 'https://link.springer.com/book/10.1007%2F978-3-319-46478-7',
+ 'https://link.springer.com/book/10.1007%2F978-3-319-46484-8'
+ ]
+ else:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007%2F978-3-319-46604-0',
+ 'https://link.springer.com/book/10.1007%2F978-3-319-48881-3',
+ 'https://link.springer.com/book/10.1007%2F978-3-319-49409-8'
+ ]
+ elif 2014 == year:
+ if not is_workshops:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/978-3-319-10590-1',
+ 'https://link.springer.com/book/10.1007/978-3-319-10605-2',
+ 'https://link.springer.com/book/10.1007/978-3-319-10578-9',
+ 'https://link.springer.com/book/10.1007/978-3-319-10593-2',
+ 'https://link.springer.com/book/10.1007/978-3-319-10602-1',
+ 'https://link.springer.com/book/10.1007/978-3-319-10599-4',
+ 'https://link.springer.com/book/10.1007/978-3-319-10584-0'
+ ]
+ else:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/978-3-319-16178-5',
+ 'https://link.springer.com/book/10.1007/978-3-319-16181-5',
+ 'https://link.springer.com/book/10.1007/978-3-319-16199-0',
+ 'https://link.springer.com/book/10.1007/978-3-319-16220-1'
+ ]
+ elif 2012 == year:
+ if not is_workshops:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/978-3-642-33718-5',
+ 'https://link.springer.com/book/10.1007/978-3-642-33709-3',
+ 'https://link.springer.com/book/10.1007/978-3-642-33712-3',
+ 'https://link.springer.com/book/10.1007/978-3-642-33765-9',
+ 'https://link.springer.com/book/10.1007/978-3-642-33715-4',
+ 'https://link.springer.com/book/10.1007/978-3-642-33783-3',
+ 'https://link.springer.com/book/10.1007/978-3-642-33786-4'
+ ]
+ else:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/978-3-642-33863-2',
+ 'https://link.springer.com/book/10.1007/978-3-642-33868-7',
+ 'https://link.springer.com/book/10.1007/978-3-642-33885-4'
+ ]
+ elif 2010 == year:
+ if not is_workshops:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/978-3-642-15549-9',
+ 'https://link.springer.com/book/10.1007/978-3-642-15552-9',
+ 'https://link.springer.com/book/10.1007/978-3-642-15558-1',
+ 'https://link.springer.com/book/10.1007/978-3-642-15561-1',
+ 'https://link.springer.com/book/10.1007/978-3-642-15555-0',
+ 'https://link.springer.com/book/10.1007/978-3-642-15567-3'
+ ]
+ else:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/978-3-642-35749-7',
+ 'https://link.springer.com/book/10.1007/978-3-642-35740-4'
+ ]
+ elif 2008 == year:
+ if not is_workshops:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/978-3-540-88682-2',
+ 'https://link.springer.com/book/10.1007/978-3-540-88688-4',
+ 'https://link.springer.com/book/10.1007/978-3-540-88690-7',
+ 'https://link.springer.com/book/10.1007/978-3-540-88693-8'
+ ]
+ else:
+ urls_list = []
+ elif 2006 == year:
+ if not is_workshops:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/11744023',
+ 'https://link.springer.com/book/10.1007/11744047',
+ 'https://link.springer.com/book/10.1007/11744078',
+ 'https://link.springer.com/book/10.1007/11744085'
+ ]
+ else:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/11754336'
+ ]
+ elif 2004 == year:
+ if not is_workshops:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/b97865',
+ 'https://link.springer.com/book/10.1007/b97866',
+ 'https://link.springer.com/book/10.1007/b97871',
+ 'https://link.springer.com/book/10.1007/b97873'
+ ]
+ else:
+ urls_list = [
+
+ ]
+ elif 2002 == year:
+ if not is_workshops:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/3-540-47969-4',
+ 'https://link.springer.com/book/10.1007/3-540-47967-8',
+ 'https://link.springer.com/book/10.1007/3-540-47977-5',
+ 'https://link.springer.com/book/10.1007/3-540-47979-1'
+ ]
+ else:
+ urls_list = [
+
+ ]
+ elif 2000 == year:
+ if not is_workshops:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/3-540-45054-8',
+ 'https://link.springer.com/book/10.1007/3-540-45053-X'
+ ]
+ else:
+ urls_list = [
+
+ ]
+ elif 1998 == year:
+ if not is_workshops:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/BFb0055655',
+ 'https://link.springer.com/book/10.1007/BFb0054729'
+ ]
+ else:
+ urls_list = [
+
+ ]
+ elif 1996 == year:
+ if not is_workshops:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/BFb0015518',
+ 'https://link.springer.com/book/10.1007/3-540-61123-1'
+ ]
+ else:
+ urls_list = [
+
+ ]
+ elif 1994 == year:
+ if not is_workshops:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/3-540-57956-7',
+ 'https://link.springer.com/book/10.1007/BFb0028329'
+ ]
+ else:
+ urls_list = [
+
+ ]
+ elif 1992 == year:
+ if not is_workshops:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/3-540-55426-2'
+ ]
+ else:
+ urls_list = [
+
+ ]
+ elif 1990 == year:
+ if not is_workshops:
+ urls_list = [
+ 'https://link.springer.com/book/10.1007/BFb0014843'
+ ]
+ else:
+ urls_list = [
+
+ ]
+ else:
+ raise ValueError(f'ECCV {year} is current not available!')
+ for url in urls_list:
+ __download_from_springer(
+ url, save_dir, year, is_workshops=is_workshops,
+ time_sleep_in_seconds=time_sleep_in_seconds,
+ downloader=downloader)
+
+
+def __download_from_springer(
+ url, save_dir, year, is_workshops=False, time_sleep_in_seconds=5,
+ downloader='IDM'):
+ downloader = Downloader(downloader)
+ for i in range(3):
+ try:
+ papers_dict = get_paper_name_link_from_url(url)
+ break
+ except Exception as e:
+ print(str(e))
+ # total_paper_number = len(papers_dict)
+ pbar = tqdm(papers_dict.keys())
+ postfix = f'ECCV_{year}'
+ if is_workshops:
+ postfix = f'ECCV_WS_{year}'
+
+ for name in pbar:
+ pbar.set_description(f'Downloading paper {name}')
+ if not os.path.exists(os.path.join(save_dir, f'{name}_{postfix}.pdf')):
+ downloader.download(
+ papers_dict[name],
+ os.path.join(save_dir, f'{name}_{postfix}.pdf'),
+ time_sleep_in_seconds)
+
+
+if __name__ == '__main__':
+ year = 2022
+ # total_paper_number = 1645
+ total_paper_number = save_csv(year)
+ download_from_csv(year,
+ save_dir=fr'D:\py\keyan_qingbao\te_u\paper_down_load\ECCV_{year}',
+ is_download_supplement=False,
+ time_step_in_seconds=5,
+ total_paper_number=total_paper_number,
+ is_workshops=False)
+ # move_main_and_supplement_2_one_directory(
+ # main_path=fr'D:\py\keyan_qingbao\te_u\paper_down_load\ECCV_{year}\main_paper',
+ # supplement_path=fr'D:\py\keyan_qingbao\te_u\paper_down_load\ECCV_{year}\supplement',
+ # supp_pdf_save_path=fr'D:\py\keyan_qingbao\te_u\paper_down_load\ECCV_{year}\main_paper'
+ # )
+ # for year in range(2018, 2017, -2):
+ # # download_from_springer(
+ # # save_dir=f'F:\\ECCV_{year}',
+ # # year=year,
+ # # is_workshops=False, time_sleep_in_seconds=30)
+ # download_from_springer(
+ # save_dir=f'F:\\ECCV_WS_{year}',
+ # year=year,
+ # is_workshops=True, time_sleep_in_seconds=30)
+ # pass
diff --git a/te_u/paper_down_load/pdf_show.py b/te_u/paper_down_load/pdf_show.py
new file mode 100644
index 0000000..57e74fb
--- /dev/null
+++ b/te_u/paper_down_load/pdf_show.py
@@ -0,0 +1,9 @@
+import gradio as gr
+from gradio_pdf import PDF
+
+with gr.Blocks() as demo:
+ pdf = PDF(label="Upload a PDF", interactive=True, height=800)
+ name = gr.Textbox()
+ pdf.upload(lambda f: f, pdf, name)
+
+demo.launch()
diff --git a/te_u/paper_down_load/pdf_show2.py b/te_u/paper_down_load/pdf_show2.py
new file mode 100644
index 0000000..fee2101
--- /dev/null
+++ b/te_u/paper_down_load/pdf_show2.py
@@ -0,0 +1,64 @@
+import os
+
+import gradio as gr
+from gradio_pdf import PDF
+
+current_pdf_file = None
+
+with gr.Blocks() as demo:
+ with gr.Row():
+ with gr.Column(scale=1):
+ with gr.Row():
+ # gr.Label("会议名称")
+ conf_name = gr.Dropdown(choices=["ECCV2022", "ECCV2020", "CVPR2024"], value="ECCV2022", label="会议名称", show_label=True)
+ conf_button = gr.Button("查看会议论文", variant='primary')
+ dataframe = gr.Dataframe(headers=["论文名称"], col_count=(1, "fixed"), type='array', height=800)
+ with gr.Row():
+ look_input = gr.Textbox(placeholder="关键词检索", label="关键词过滤")
+ filter_button = gr.Button("过滤")
+ # up_button = gr.Button("加载")
+
+ with gr.Column(scale=2):
+ pdf = PDF(label="Upload a PDF", interactive=True, height=1000)
+
+
+ # name = gr.Textbox(show_label=False)
+ # pdf.upload(lambda f: f, pdf, name)
+
+ def up_load():
+ global current_pdf_file
+ n = r"D:\py\keyan_qingbao\te_u\paper_down_load\ECCV_2022\main_paper\3d-siamese-transformer-network-for-single-object-tracking-on-point-clouds_ECCV_2022.pdf"
+ current_pdf_file = n
+ return n
+
+
+ def load_conf_list(conf_name):
+ if conf_name == "ECCV2022":
+ root_dir = r"D:\py\keyan_qingbao\te_u\paper_down_load\ECCV_2022\main_paper"
+ return [[i] for i in os.listdir(root_dir)]
+
+
+ def look_dataframe(evt: gr.SelectData):
+ global current_pdf_file
+ if evt.value:
+ root_dir = r"D:\py\keyan_qingbao\te_u\paper_down_load\ECCV_2022\main_paper"
+ n = os.path.join(root_dir, evt.value)
+ if os.path.exists(n):
+ current_pdf_file = n
+ return PDF(value=current_pdf_file, label="Upload a PDF", interactive=True, height=1000)
+
+
+ def filter_by_word(words, paper_list):
+ word_list = words.strip().split()
+ paper_list_filter = [p[0] for p in paper_list]
+ for word in word_list:
+ paper_list_filter = [p for p in paper_list_filter if word in p]
+ return [[p] for p in paper_list_filter]
+
+
+ filter_button.click(filter_by_word, inputs=[look_input, dataframe], outputs=[dataframe])
+ dataframe.select(look_dataframe, inputs=None, outputs=[pdf])
+ conf_button.click(load_conf_list, inputs=[conf_name], outputs=[dataframe])
+ # up_button.click(up_load, inputs=None, outputs=[pdf])
+
+demo.launch()
diff --git a/te_u/paper_down_load/urls/init_url_ECCV_2022.dat b/te_u/paper_down_load/urls/init_url_ECCV_2022.dat
new file mode 100644
index 0000000000000000000000000000000000000000..a34b1f1e7bf455baeb4f32867b99256677b75e88
GIT binary patch
literal 2299323
zcmd44OLH6PmagZ4Cuh$+$I-EScSjT?;!0Aks$=jjiIi9bOI4Jkqaz8VfD}O_3P6&g
z!h@dq2YBGuY5cj0ZnF3&+oX{8Od!{V)vr
zdcecWa2`~8!(ciK=RZ74<{zsY_L=Ohac~*_@a!rIuP5xas0rg`{ArNjVALb9xq+}
z*I$2q;Ilz8AI8%^{MaBIhtptQ^vQWNAB8{eZoltT{)ftLGL0u;Fs^LJmy={3PAl)D
z8NW-VGn>V|DA4cq{R{Uk?k_gtUsRcfqaU92f^j^K__sc*45#78AD#{8^U3Vxix-1%
zejdm3*?byIp7-L*7o5rLMe}*aTI>8=s(q7JW4Ld?(t>6I*qR@!6+JxtI<5XoK<_=30Tae+NexESNTA*a(S*N
zH`R9Kd{Dg}^11YYxS^JQh;E}G!b}(M-hH2+wDb<|4O`jocX?A?t6Wa?2q*|}&B&z*jdQrIy$BF%2vb!M~_ruT6C&S6J9}nWo@cS3R@=y7i3;l=S
zJWjCqj=p@0>v0Sp!)Z9~g_z0q?UQeD?ch3?_PK1wzJ7}Qup=+yPO=xzwP%5roCZw6}roH+!I}@l6ecW
zN*U&3|D};`%kc~M#a;K6y=1>(b~!Hi0#m;=%J1>Hi>FIZmLFbn*H)%6;q93o+P(4M
zjg1#cB2Md`e_6h@?|$c~BHt8FtI=gJ2%lA!ely+TOFk~Usom$@ZGGzj{+p$Hd~x$G
z|0A-0XZl+9WywF82U~Vh|L*%?otU}(o)#Wv%h;j#x4Bq
zv*$st7f<_yP&}#S4bT4W&Z*k>*j4Lm_4?hn&Xf6^IGp;L#g(2_;&E?8KO@^KnpR`%jvG9w|Y%!gs+muTc11}8oL@%ui>-}aD<
z@FwIB{7XFku5u8B)qS44<`q7~2lT1`KKtGN{JXzg`ev=sZWDjjYR@PAkAMD=|LAu6
zd#-dFTov~fP`>iY)mbvZ@96S+Gztki^I&>YxeU1DNj=k_=nVXUAH4Xvi(E~=(ev4a
zJarIGEmgTH9Q2Z&ug;q4w;@op-P7XJz+J`Zp_k
z#O*&g;_6l~3;UJh7~fXe9>qz&auN>66v=m$wSJ7JmG(~M-T9}mH|Jxsu(CUw<4^3}
z)ErL2V8pRc?-|91VJIuYGQO)EMqh?;Qu)Q6oF$e0B>t{)6mWZoNu?W%zFY?5vZhko
z_+wM~HYbM7c{;ILef{CPvNNvil3P>z@TGD(4aPH0=aPK!DDHGn3=!7ML00*=ED;+HV0{F;1M`BhIVTR~LvW^Jth@dOdkbP
zSyJlYcfvV--XMxQZo|Aq{$-1}L
zzg}Ar18I3Pkmg$JVFT&(Nl4@pw&Ut)7*H*7Sc>#NkI90<=~bkh;MP7|8h9J`b1sA!
zGLvgKk=r^LabPEB(HM_aIlwf&ZFX2&YkM*guDl{9vhK}9+N}a-au|Wu-6SLDzl<*83D`jy6r$E#^JNRowH2|2O>eePTWc2J5FGtMLgjxCTKL%l
z&(UB0c}4F3nm-t_MsN4wXmCu=d(U93m-zB=JPj^`
zJK&b;l2_w?%$G)c?f?D1(hBxAxy;!TxW%U^taJ^;*d8Y1n}p*hZ+PQEh;qzBJ$xI5
zq!7_;n5ZtXgyPJVC~?VHvFR;u*j$yoq2>_)Z8Zw2k#0EpsCy-=P+jCWg12Bu*cJVG
z^E4G;(Alhd5>BS!Y*CJM)MyvJpEH!h}@6^=haQy?P40&5+*`W<$nAu(wj{2_TJrKPm1>%y1M1N=+}L{&$sESD475T0~bR%>>qsCo`$17|54s^2D7oe|Tzby;0P8
z!M$)|=j4p<1WtrF3i6r0fgrD~b#@z$gmus?B|$yJn*hjTF%&xVX#z4%2aV)x~(%;txQrZ8QoRGRNKO+wcUx@iw$V_SKl+
zLvIualtatl)T3lHk6g3nnC=anCGfIdrAsq7B*(aeu>TT{e~Z$Lcb|xKql-BAL2ErW
zWZgYgeEO&a#yx#=9gM4IVOV+dT&?#(NsDRw%3bR1mC0TF0jaf(R_)B~Dky}Kc$a(ZlTpqaz5osWuTGzM%`D~*{H8dcH$3Mt!>r|YVFh5n0{momn5>>
z^)vNsa#`&zwJ?1><+WnxbVCwP=&G}7io#mX+`Y}2AKlfV${yS(H9iVbdVmV4ked6g
z@)b4tRB5MGc6S&NA#qB_Wpmw4Z%NGN$|N!VP}llet0;-N3Sc&E2a}hT&R7H<%V#?F
zs*trleVtfo@2!Qn`s%$n%Pw;9-ScGXn725Ml=?+L!X7Ag!l|&9w0=6^@wMyfsI*cL
z8)Gi7`QTZ2j;WT$WEwSJ*$r1-702Tblll1qi(>pF496W{3nvMoTTELR#z-Sx7$^
z-L?T2814g=_!3Pi57dI#4MiAKA_FG#Cc3Pg#R=IA)^ZAc@ZbhW@7=ImaB4JsHT}5q
zir7fq9}rxx)eEqg{V+ZWKT->kzzCLE3NdMo91Dc=siMM!2`7Fkf0Aj_WGcndmT7@^_QDbF@M|FQ_ZoOVaD|HpmrThbDHM_uB
z98be495hC`xYK0pGLg8X@+66#gtC6h#ihjBHP)D
z7=fwo-lP=BZa9!$mrv^y1ECwi=W4Thwq
zlC!kZtk}k;FZ`A(uSyW|hu_v)?Tv?v>cyafW-QOCK;}1&m=vEUG<{u+aarsZ!+A^8
z^_+uIpQsA9i?bjVc^zF+EaPjig`MPPkW{wA8@d1%dr|_dY906R2%D)M%v37rTgthb
zrZl!NiQ!(@PwknXB+El1>@Vs<5M#v4zmxiUPZx<8b+zP?dZ2a%7w
zecuJ;jjzHHgWMo$-J3DU8W^0z^2{U*p?NrQN#dKtRmUmEVNltVBT>HMQW~qADUK)V
zJ(_8p#StV4W@6^}q9djm=}tjw`WW6Aa5tS}ZYR$x@0f|!#7!COuG#cOu;$9E;%@xG
zy+EG@4TA1P2-Br&!}(K6Bzt;Glg+u9E82mwt}1!3ItXBSE+e)J5MSKhqJ@{mC)>`EC-R23@IrMf1Bsm3l_GBsPiz!t8&Dwg68<*je57c>TJSZga9(+PM5
z&KqBG-0A5rH5KUuERr4j4(m=&BZf*Tm+~=5&4%bVJk2zYf2(W_gE?*H^J##-!zI0d
zVZgXGCDnZuHpbK~7fD)mZ^lty8RPJW
z%r+2uco-_#VswjWk9T+r+m6eiC3uCg4E2lH@t%npNzX8vuNWIug
znod^x&uA+D`p*PAI>o5GQ0-dgVqpq%q#r0BBc4CsRMOA1eHFf~_No-V4S$eqqq$j-
zv39qs*M~qR8Qx|(Nzhju-C!>a`_;X8Dq_om6d6`W#ex+C9*Z%+Y_b{r5I7nIi_2OV
z79I4JwK#``ZHZaH=So_~y0<8V_6rR570{e%_(NeEt+fJag1u2pvt~TdTyZ>>Lcu~{
z=rrCls;p(I)h_k|p8tggqN7Wu){;+7I^+H{3{)T`i1!FtI|wcutH3;pW7EQon_OrH
z1gu`vTHY#YAe-Jo)aJ^BD1VSE5vqVzu(Nx3+BwM#V+jB;5vqX96Q#&11EGAu$Lwh^
z82LD}oRj@6SLfuSkaSI4KNx3nqj+Pruw&c`9Fx(=YpCUSkVwB^OO6JGBBR}=p$cEq
z53;x=SCk>Pzi0Z~9vZ4GaUNOz65-Ai|Thde-
zzWT`R#;Ra04SyJHV`HtLaz5EQu4)44CB45{)EB(66(>+4@fDOks*|OECYVJ@@-~&!
z0FkBYVW_@PP8BU@8YieBEZhbSVt`wv!6~8TP(EjmSjQV?Y|}ni?Ov!Uf*wqT`0^Ym
zH)SH2?|NNQqO0v~CWI@mN^av1t8Hv<78ubjuL;ld;E0&3od9$xcr-79J>`rAPUIK*
zHfhA!2@GN}T^%c&6uS^rG8Yq(#RPLmEmoSNieWE9xV7qto?~2+*WYgY%5k{zsu+qt
zgtxg?FEW&1Qr(*}u>)&Gu7=^VrTXV3xvY%|nOosK5@u-s%-b_Eh(lrs&4N_Ua(3}r
z(|%m(OvmwvsX`K8rl5bh(G+TzX;prhsY@%Zi6oM7855AED3VtX5X+$E$BsG>xW&(#
zeVGJP{1D%R>bYRSS+h$yxLm_8@`|{GrayERI!!^-M>FP3eO}F;%Y8>03tJ=^=^zh5
z#ewu0)@IQ}nQdo$BiwE~x!>T4EP$l48WhsZ4N_0XpAcH$p6!guX!&v6=)sh#2cI$F
z_BQ9iZ*%k<*EB3^GTw$X)>mdoqv;RKZPuFwCUbOf!VD@x97$!U-%_)t`U|81%>i0x=X2@t
zbk4HjrzWv0uZWfS>)JFM^#ZI6*drBIbCt=m9(=|@%mmk56}!U#=^c~G<)Ar9FQW!z
z7?}(LuacU|nUSI$e;Qknah1tZIm*@}*l6X-2Y!focYi&U=6bzAG3(uwp*V^yN7m5L
z_fl+6wG~4b-jCu3hN-&A6d(!Bq&Eofp$%}D^vM4tffTdu;Op7M{7u?^X1DFiD{^=G
zYl<{C)(YY|q_5dT_#?iW4%+p-N>Mjs^$-=AIexxyT|@2;T@uXhGOqhcU2JGqrpH>-n5RL}uLLf*Km6{}}uc6cFrM|Z-
zugDGTufNdT*evL3KHENccd|=O@8~z`GJ=v>(=r2!cYDj59MW!QiuWxVo6mB$u+1k9
znOpo-T88kv;d9aNqI*HtYK)EG-uEGWO|{`=JgXitM^etk*Xn+FGgwIdsd+P5Q1{n7
zXl~XDXfp=t;4t2YQ#RmHx6OpsBGrKlxyHXtm)cKYJ*o1c?S0vc=Au;OOk;N}Ko+q%
zxK#IzAVYyQ4Zz^EL~!>|G-PdE5BwKJ!?^mE@w9UKW1Oj{@glqOiue44!YQYHJk)DR**Z%(qwFW1*cky&A36;
zyS9?^11P6UO7~4Ny3qbV)#$XSXbZ$cI97mwPTh9zIr^5#Ff7$x=GZ)`B-@o9{ElxSK~GSy*n&C=Ng0Qm`7AqDSi_k&5cG9*7#%C=ax{XAcly)T6eHM*Dw20$CH4
zt{poi&rz(R`QX$Our^6>P#CKnHI^+M+1QH7Cc@idkXoxjq|af^aA|y(3u1Lo3d>s0
z`qW#d7D3D(riqSALHV;C&*nSZ*3Q(!iqjkf=FlQCXH~aN3G-hIG7Ju>y7M3IP!}+^_0y@2bDiiX-8{RIT@zk4sh6pg0=2H(5ER4k;
z9NJo6FUWYFb%&i}P&dZq+1nh=VJR#N{kxRMDUqeI&|2O!p(Yd7mnc8_&af9_SiNHv
z-etNhHIJ_xB@mo0gMQ^hat0pzM~I30vHl@9>~C?@v5Blnty|LLD9oy+rS
z@}K?*lx`SbGL`TKyhRwy2f8bBY)|8I+EfHfZ$F5rvr4+7wjXgrEUcZ11w9#cd|_e!
zVBOY65kpV;GRvh{9&yX~Mk!%=P%ee$QUgP(tjB=KfL-DsZHryXc_{agaX5+pu#aS5
z(*sU>OivZ)&?O_5y$K=_$PYElLKEI$dzM5A1TJYY8@>SE`o>dlTwPdwwAn%V#tIytqV-3}cwPpFWSmHu~V+7)r
z?Pxj<+LzPN#O}1zRWp_)LsX;Yt0l~pSH*h#LBQ>`dO_#$Bn(oyx_9HyEeDtUayrs|
z5R$YwZSQ;xIwb=lGZ~V*BqNL!kKzWS`ecV#V
z7d%k%jXr#ki@d!cKsO&!grnvwYZ3n+-?SAZR8>g)lJLBJPKhROvOQ!_U>>un@rsvE
zZ+h`{)caD>S{@_S{A~yX9u@ts<@Uv`1ret*q-{(bSzuMeaKGrAqxN74^|3MPS~K
zAK4*;CpQuQ_tDfOfzphFWg%cR_lJu<7~Ez#Cew+`8R7RGTh9TzonE3|;cI}DH?Rx7
zkbDH}K;yUSGcQu|7}tHZgBt5kJvp$r>HVR-?RL8W?Oo;u9V6_<4R13-X!LWNYFZKT
zNJe*3ePukr@1_aTEFCzM)|m!LYu|BiPi_vXkHdeYnU4a7`)D?B-=W9GigIOPw)o;R1kdS5a`sePI{Q{i
z_337lBr;{q}bBWI?%_-GzMq~+++MLs|5%yrss*z#x4XS2jh!
z(sD4>5zwwCeKHr}!I~YE46MALq);={ow5$27sJbt>R!1-zq#RSZi_3giXZU@12)=4
zZHJ}iWrZaL)4QVxyW^gQ0tPSdYDc^MZacInaS==Ql*S=rb7(r@U9UGXp*C1OCIo>(
zO*GMH!+mgpPL=G)bsi+!Xu84}74Am$q%nYqXsLx^c5V$1N;qGn3F&;0I;l(;VtZiG
zJ$hn{tMeu$h#nft{gBVANF`w{XOk^&!wy{8+2oTKpB{CHo9zN*`c6Dsh!s$A2z^Y~
zlY&$-tWKXaOLYldekJVZq4?d&B23-uRNzR9mL1suA1fTFYyH~u66c5djU=$CC
zw-=J2eK#c*q5ww|oA>26xS(xp~wRnjGo>cp*$f;qgC@QSXs^}m`){g}J4-byCU
z#2QPwDz=;@>!{_-<^-G_6|I>@sy_3GJ?kV_b9A_5Ap21~9qU~`ddcbW@Y;uf
z;tYE#IrrZ3w1q`GrB?-9^{86Bu~yJmO&6I8TO}6JSD8AB;T;po?ZD0ARnA7vNS#S!
zksBpB0kiq6b(D?-X7hKf72AD|K=ElnxPgK}=pltZBL%h`S)vMJk6Aig(#3508svoW
zvMLeC9|pWxFX+d?;+e9{35-2O{H&)7fLzk}DLLDx9;;`4nmV-H;l);^@%ixt2GW
zYM!QxpcjQweimG0Q6RWhFA@k&Ljdr5k5)P(EQXQT5fwQ8KVSk=
z%vvNc|IU1+A+z%HQrf~-l$?KRds?AVU*pQF;@$m`Hf#0uf;BpOTZY+dkHtA@{vAsA
zaz1c7t&+Q2Q3LpF#32d#pU
z5b4D%w6+{^2Y^h&-pRf6;54X$VL190ndDDAwxK0W&DlCogG%2IUR+PJXAhGcr{}u7pl_3IS?2L=Vc7{TloYA!Mp;=7
z7NAPwo{p4g!6x#4$i|(SwwvUui83Lj;Y|$KuKe^xX1iW(c@zV#*O~>Kw8P*uJlO+(
z2+oNsvX9g7GyBD-WFh0lkXoQEYV=tgT9I=bo!Rh$(?5!)UxJWKf{l;JgfK`%cNh4Z
z@cK{Lbf#~iOWi*k%R-FvqKD;dq%D{Ra&NW#4#i9^m1q}t&sQrp8w5<0~uNF`V-
zGVPZ<=$4-$cMiIh4!KYIAOHNLwlcCpknAcT>Auvm+$caTG5GQCl|e;UBvSmh(u2S`
z`{5Y`+zW2p(qv^jXn&0HI<1?B7Xm8^(2Dwa!!p=(i#YeFdld#M*t9z9cIp`|HFxS~
z0c$)Nl<)VW-!#=Oxu}GmDDtu3&yX>IB$ZVhQFiZF~4SyYsdb^0gW0>bD1Bz_?6`6)&1J?;DqIwda;}7msszyfyt*{wJZMOOh
zaSeY!K38@3RHbB#c2ciH1Pn-w2Lt)#T3SA)Ka#gnY3JVo%xSL+pK6o
zN1rxfm6Dq`IoZ0Oxi!1;intGd?Uedvy#UWRK9S9^$>35+7YddsOY5H9FJzkjW65J0J=Kxwu1Aa)?LkLi
zx+V>FxjdYiF89$=l6t2fOy;iXs#GH_S`W20(ejbS+6!n}eU=v9=cgY8h#b>db+zh2
zsOd-f5c+G^;2`#xu?NS+5zl6%Xp;q>EN8J2)}S(duw)!+`GLtXpVkV%Ol-I;ygU6=OQpmMk_OF+E+g3bRgtA#x2CmD2R&ZvJXg8~5wo
zZiITe?7|T}6lGEb^h3y22H~NWy09QzfGF??46U;eF*g;$<@`sz=7-&hWLqs|cRo23
zQu7CoHXG{&LbYnN18#D9(ofidPd?HHF1N?0t0hfp$fa3}%m%b%IiKmw=g@}owXVvU
zPs&UPJMtIN7ukowAtC`El(mM&BNEkbgJjnHL7B~F(e^qNl9C=^W?1cjNQRZ&zS4Y3
zLspqghUueB4u=C_nh|K1(?iG_nZ2ymwC30!j5sRj>z*d^lQ|;C=n_U@RnV}&IR#g_
z*OWV^g{o>4>eQ0=v?o1b?&(ZV`04LyfB0szRivGA6z>IZtw(xmFR6*7yql9d?a^E<
zdgeqEHIu5b)fmIT5PnVQkvkysSD{!zT=MkWzEG=rdj+JDT(#g2!ECk~1=AhoosmqN
zH+EoFgf>a2yBR;BS8XYhmg(5%@{apdep>oia?>_dh&=sRTE4=b#JPjrU16}W!WbhT
zRHvh)9Xv+C_Z#_ge<&jD$AV^qn@uOBN-KM7KrM9|RJHw%kNyxcO%9#frB+A$!L|7w
zuABN(L|7;=%Ew?zIRd3^F@AGHtavw?ePI*!1s~MfYl5!b(D^CRRQmfgK`iqkzAAT^
zbPqg1(Oh{|l6il)VzXVOx^qa{E}9(Unr`*=BB{6DL)y@UDb-K2d+zq%oe9N)VzVl1
zMd;>s=^06YlP>^YDrpOz7GQ(3)mC66*g1HAa3ZVq3LO06=$*QzX>@4}6vwOtJ?&F1
z?a=)_rh`bbW4ybB)7Y!1XAt2~%M32DHhjlXa51aAyPACp#!-3y;Em+cSQ(OwN2%QA
zW}{#bcv0-zCOzAvY{uXyPpm)0K8OL1QM;26i(G|=dS1r7=CyPmKt3tx4w?ET6P&vx
zYNx?`mfQre=H_hVyap>e!(1+qSAmdyLGEx60Ye2+U{1ayWeIbQmLFvS!S$=+Mm!4j
zHaCkVfBIi8KC7vb7;+g?Gm9iWGkqC@GdhU;&WYuvsqa4}alki?)DYDT?OOYm#iC_UbxeL
zD;@;1oAM$L&17^U6$!Sl>H`m5dlHg^gf^KA7nw?uNs2C-3akPwE8!@aVr=?R^TRqe
zS72(#QVfrByDi27AI6Tmj03u+f}|axNK#0KeZ(FD?XOxknA^EPXD2^TW&tSkojo%-
zM**Zjw;NO;239z8Sb~WM{3heFb!Zri_zwm=SV<}<$_B(vm6KRC^eJK~C?!of6KZ-I
z1#hfz>YD|0Oi^tqSNlss0m!c<6Yz(dCCLCfQ;Rw!WoKFv8&23yl`|a1
z|Bxh^!Ti5Ghhm3BKL!co@>59C@s@04hOYmcVLTcH{c!PCN&Be#YFvW;u8Mv511VdL
zqV%Jqb5BMyrZ!l=I%QNtOe;%T_Yt>JwY$x>F;~}6jP^(J4n&ODu4!b*ZUgaHo
zCm8;je~D6A67mveeb77+be66M*^WU-N~*hAawkvjF>=rXzu|gUT@c_wx_oJ4;Ld=>0mt0+m
zRP*)g#4(L^U+oaCyejU(A9C7eS<=I)tfj9fv_f2Ww7R5wW}akOBbKhn0!VRb?@5UI
zQFsM5BAk9FBsY1G$Ga7~2=abJXH7(!{pW5q>)RM0trMZXXvo@65sUC{v3N#Jq=in}
z0v?<6hs>{(G$U_pn%pL>+&iYkRXu1QrkD%%vOtmY9Mk0t=l5NdX
zEyVXy$c^E3tv9j{K-PUwB5M~87j3x+CYQK(N2nS|w{a&xFM~+by%}9e-?%d!B#|Vt
zN)8;hYQ7Q_uDl|_$fI2F`dYn!QUj<4UjsNk7#YSw#;SSc4%H6e0$y*`3PwxR
z%K3Q*N3s_%hGyx=-K}~0fRYOW6K3RAt!lr@tqwv_gJ%mL@t6N|j&2Hpg#l+ANQeqg
z#Ht=NU3uv{P{5gH6ZVp_=HY3@cZ1>QRl!p{Dgm>pslYtC$GcUNa@&d#<&qsN!?f1o
zk*1EOO-^X%nG12PVr@&U>}tZOXJvFQ)JXl{Bc;$!rra8eH=qeuHfcF5MX;>ES~Ots
zB>KT;BL1H8xudXTI?G1$KjxEs8*-3GA>Q@%wE}mKB37DKnuG6-Vsxznfe*}|l1IR;
z&2+xmk4svpKdzoKQ-UaU
z$O!(fWwVf7N;^Hn=%d4789CCYH8wH;o8@uNHOK(~yg>yP*IRv=)H!V3%0T}b;aEf`
z6wXSzqI}f~U`9-=#lv@&OGeu@Up->3yeb)oKcsbYy`TwrJdH^L6h+Lq
zN}5RH5pm=zI
zr(W>~yw(tjc-S;F6o$7aqbW8DxsLbN;;SAK2NDQF49`>KDuI1syYM!j!0PK&i7Nh(
z)LN}p1drYHb~^b5ZZ|zBBll?^0V^C|lvR_fbD&{N%0_D9MB)pN-+OzDX`d
z!1jzYCu19LHwB)9cxnnR$UP1~dT9Ghl9nH5Ev^+Ao|=w=N~Hsl2ctcr}J!
zjYdhPexEOzG&I2iKyqA)U7S-|LG4o}E6sKM0=;e$aW?x}70Hcyxt#5+ulWL3x$=s5
z9-nf<^;&@z_4biXM2$W!XDpP@i#kU}h1t0Db6u=g2Mh6SgM2?193F&hm}$
z4*?;t9|tlM$9I#RFl8w|8yNm4>;=lD=Ir7`+LM~aENMNDSxNp?o@c$bD)_5Usp7_F
zLEK_e8qie$OqQxtT|lCv3=_b1_gP`>#_JtIGjg%&cWP*su!+<}d5j5YrvBLMK?AsQ
zGfuEYJ3~`JV{AkN8b|=~n?SJRnVLbS?y`qfu}cAuwkXM2JL(4T%T?J;Z*s*zd;>#Lxr-diVm})$$pV-y
za!KDLAu^(Nxj@BnHXZ@nmBaS4``VwdM!i-D?FZ9AIInijl~JTjkp%*q`|~^H1tppI
z?>HF;7eKTO_8Qt}mdc%SFeU+ekx(f_oEEcE6r4P}B$^dLzKbfEjZG?Hr=70S)FY5J10WuA9lQydGWxQpE
zL@p}5MMrCCJjL3l>7adQ9$&KZ(tSRU44g%(+ni+Up*_}00O~y27N&93aSL-%28ITD
z5G0d1&NH+eYVg_4r|E6jk^2L1!yd&De
zqDZHPI(YYrVmj?g<=i7Zq*(Q}xkDR3Jd~7~&3;T&N!hsp-=KMyB|*s-&QtDlJG|z@
zrDheIYc)@Gxm|fh;;FwjL%ms)XK6?%IA@~Jswf8`Yqv9`Q+MTa^kN5Z!nU^Q@66S
zWZYlRq24Om8T5)J=95WCR%|gNb3c*;QR^U5#kB$Q0KYpq8xvcHXs_o-vsC_06Ov(7T!bQXgaWuw6QBck}%-QXZtTKVGma(eXFUjCBZ;68P5TSZ|&nkLm#XKLz?oGXn>NOwO9>ygL;<~359L+VZyehY~KNPLWqVk7HB%OY+7x$9c
z`^HOv5RHg`j1(>=+^Nfu$=JumFMIIJ#+e&
zU0@2Ax71hRG*ZeHsEet|>Pp%}(^vLTZ>~)C;1BUbGqqsD!cI84QtHTl@>m!GGRmmw
zcW=h=grNy7qIEB36cd-18V%`p8gSiV@>!Tex})RQ#85leuXWA-RFhK-*IQzANgMIB
zg@aJ^#6%P~DZB`Xq1A*_x#^Vp(Y;F{lC%;p|x~{yATOOkY8}U)7~D{En6d8>fXEchf-fX4;h5&J1OyoBq=4NWlIAiL
zPB)op93?AJmH(?w;`Q`$>@@`wD+z5iYL6&szbcdE%FcqGO!;QvSo|TI3?F7x_H`-Hl>3gfGsuWvN{2;Mc%m^dDdCB000Ny5AFDAC=?
z3pq(UvkB&Xp@fi5%4mA@I_2~k?YG-E@A(gt3z`xK4DAmBo8Qk;M8d%<$YLL(epJ?j
zw|!v|jrOWw5gwIzTkRq&B7?5--rG8s0oBF>K70>GP!*LkBOU)?S47(%KM9r)MsbMC
zfSZhpZ%hWjP`@ExZ*2U9xftH1@}ThGqn)vIS{+zM4r!Zb(`O*8CDnWJ2OGW^W?Xqy
zJdHollwA=8;U=>v2e5YrY&st}Bg!UZY@`8fl{q_Dq-iT0x{6BJKgtXsD@yLcbv;B@
zH;#ntLnfj8AV6P2##XXd1{42}vx2Y788@F=`F)X&_(MP2Yemw$sl#Bk*GLTV4!8Ym
zW&)aK7YnEY#*IatrQS?F9T9}SW#m&$&X5XF_D)qOCY;WrSkGxzZd
zon0pK(DRi<5d~|yzF4`7C|pr5Vvc5B*+NO?DvI+O&j=}MWgv=F)z=sleY?wY`E0Y{
z3%lUTD^hvZ{o$eQ^+rME$#$6OH3Gb*QM1fu!I+7&!gEa{QvO*A`$y%Pt3VS?1WD2Ph
zI(4y&$}i6=ThHyh&tk;Oq)?ZflRYT=q>(xZVMeo^l(#glbda2f$l$@YI(W|8Nm;*(GKFY*h2sOEaTDF1Z%#h%3k_|;l!?-qv~VRz&COrgMiW|k@t-Nul7Wh)fB
zs+uKr1j;3l&~-dszLuRrYCeVq|5ZsQS6-DY!5=WWzF99C)VFEH_nqBzl0u2XC(~g^
z(=JsH>&(lx#HR7zV*jh*ZmvvDkv&SeCGmcg1`D~oKv
zNER#_BFKxFFJ~|yVUTU0G~=KQS6o2mbS-xC8|CtWR^1ovf-A3zN%&K{-CQqFKO_$@
zEp56GG$U~4Ha=hJB(}5GAr_WD_OGxRNp_4X51Ppt>*y9G)_72I_awWbuZbc0hnT?~3s^vw}fzwe1vXu@?%LBbp
zCrABKw~NZ5Up-bPmtBzEM9=mSRmfLmCR}+{ypBJ3wB9b7z)huYF&k54XC0N0iX|;v
zsgk)m=rk0&4@`8)z^AkAlx?xoyYOz75wSl71Xa5JkU;X7~iwynsR|mDPrZH*un(l^Ok#{F;`h
ztsOX#C*RtURiZvsaBIy1o^J=O9;~3+6-&b1(~QNWh=vJhFeM!K4Y=GIk$(a?eNJ-CxCXrd2&o8Zx|
z`Rc84fU(3oec9!vEfX!6QAxnF{nz8)cvPuv)Qhl)4g(`V
z4^!#~EiM-RbVAIB#V;yv_cD&96);G}0b%Q0a*D)&DUcukkF$mJ~cf7r3)+<
zo5n`O+Xj<#Qz^G8#QmeT)3p?!b1@qRHz1)ysWNTR1!0#q3hko{)<5yeaWt;d8gn%-
zVi98P^`|$z3j)wCRbh+Z{ZIKZD4s1
z&zam>$#U9qYbUZq*g(sh8glESoT6OLp42wg9FyV)L_M`b1c5M|;3D6e
zdVNeB5Vh^CqA6)DZC^EQyS*|stv|?=@#q5H#-e$)`x#Nw%Mg0zVKf-dIXC{hRHdXq
ziFNCD?k<6WSf;&%46*>TgJ4Y5!zU${&*D9|n5yzNxulhnvnnAXuEJGA9@tGb#Y6RN
zQpO?E(QMEpxncT^$5KKR%b3Aez3N<+mV8ScwIp)VUiY&{sL2w^r`EB|a$=7Px{ZyZ
zU707ZwmRD{E2r@_DEcfLV@YFL$Rb{1o?zK6(&2tALXb;rvUJ$i-SyZ;_4aMffDRI%
zeGn5AQ)CWQ!`KhVeS{~H|6z;F0#ZThpZ#_+rPMhwQD<*j>5KDu?xD!{7{wxh
zrTlrP;MVO8Q3S+^=v89d=scO03OyXGCp9p;@`{*@M`@9bMnSg*`^
z-E$8MFLIxakTV^`A}p;HoDUI^I!nono^{3$lK3xK(?tR-U`x?HrYr>zWfmtM2Uqd9
z)OIw^vf+oqYFAznEAiI>YHn^85a5=Lf>f1y$>}OD3NUqmIy%OcSgs~HLuiLPWlRc5(7SAg&*d9#vG9tYdq4v$YD)R&QCssBJ-G6UI1+!1`&J>-%_067f`I}9v)U3WmJn?hS<>Fn+(xEN
zD~d@>R|x?31UZ?9qsUpH`c?dT>J{6nx&+kXP*P{2Y(#m5+>DY}eaMYniKp_LE+KtO
zs$jQW*fHgPNj||}apPP7y`StNWoNQlYv{cFF}b!JRVDKb{@Ok5wW7g_ZrEcN5RlbN
zD9)zX!TI%Kg2#=(cxca6J5128bW@NLO`Y~MI=5kPJ?Txg!N=%pxtJl)RLvd*2*xNl
zI&sigHl;%q^B`ZR7-hCx6sa{H0g`^3BlFjDX(K=MaEII@57d&CHWHkRG
z;riT-eUgkXXr~!eiKq&uAs(ulXsRuPF?EOfBuMm>E)Z+$k6;FUoBP?L5OBNMEGX1&
zXWj$@v8=faJ+Ss%Qw&!W$^4${u#c1lr4Vzqph
zz3cQiGxN=;Dc!&gPTQwCSnHpbTfjmb-S9ISZ&zNCJgw#rKWw*(@WTZHl(M_rU7QB%
zR5jst7@?rP|245po^F&)F&awbs2WHEULimHoLhX@F9{CTJq;?g&>Vj1L8UB-tNB9*
z+pVG%vsM^A7NHAUNDQ1|@S40Vl(I%00^G((jqFSkK9BhY>VTA@Orw#&A
z_(VVEtdt%#Cs=<(9rul#ygxK_7NGui9(JnP)%fUoEAt7T-yWqoj7ADS!Q~B`
zKwr~C(nbLc7HjEFBj(((9$q_nerq4po989FNcBxWEO30>=2Nr47oNx;EV#a2B>&0g
z@KNltmAT|?B!~e&fc$~o-#w9N`?DSa!wF+7S<;~v=;wEM%i8NQRJ5nMnO$-#M+4cAet|26jcDAyE7*9sJy!zOP=?V
z`9qw1s=Ro2+nJ3a>Jrv97-*0^hG%w%&Za`q@kw^XHIe?}?VdCB0L5{oBVf!WQB!^u
zW?1s%9;1i#RoO$MwJJr5M@is%vt5uq=&X19gHZEjHwph{#|Qu
zld7RaRA?s!FEAZZ0N*0@!)f3INqntHR%dlwTn8uRb^X{a6igc7tXh6EFbPYs03&t0;
z5Dc3>K-R+42=tCIj`X=BV={h-KFdf*4TuMC@g_rUv@3h;8-6sDNDMYs1)8k;gC^J8
z8wJ#p+gjE(L5$}vDR9Z(t-Vxs=st5VpWnD#!4?5vn5V%kudswp5^^aE=^IQ1FW}5Z
ztE=3jlK`{eg8gYg&Ttv@$R&6zV?vy79s`7KB{a9ng&9_o|1mk>tHKOdUXgUN?hh?(
z7NnEM$M~ny_yQfh<7qO+4A^sP8WwkSTaV%~i}psr)L;@Pe`3SZZXJnz&n`4sm~Rhc
zB_`7)Mm;UetPf_Z3eqv=(3&7`2r^EE$#uxmfkE<7QLD{Y8C?bM1~*n1i@7ttCu6QK
zOIFgz9-|7p!1109=kv+z<%<^-`4`V;;5{r_nm+HvmoK;nCPd*EEHa3vLiAtPcFBy%{w&+7!OAo(i(-WqYVnd>YsMr`s
zsF|oon#()sLVu2|7tLuL2g#_?Wkyvn1qp+-J5ePNF}^F=!eka!9VE2swaZJ
zfxc0)qbR0~DR(pXA@_3-kG_;V*v2Db&Nn((e{g24h_ZfM?VcVn*|5wZvW)>kF+PY
zxdI7mDJ=pt61{O%xbmt*0e?_syfJh*YQ~T{4x)mTU)Tc~k7)sXLi)
zFcVN8J5{{d;>NXnGPX%^+L~#)V2@OLZim;b5Gt8WHocMCx$>$Qgg;oaT`S-&36+H5
zhQ3j+H=^r6Pn)r+w9hTSaYqXJkxt~NNVTS8#H3;q*b8Cb@tQkK@KIgR=_nhRT(^<^HO#0&{bGDr+847BA$(jE=g^1QDYd-R$)(YTunNIcX&K}wUv38zvm`jBb#Z&Y#9wCWi
zJkNA^OT!s~w%P1OA@Ak<)PBG+)_K6$1M-p??-0SLa6h^B-2eAKVj6_FU9H;CzE-9J
z09m7pc*KMNVEhz95u4}{c4Ha<3xYKA%>!TCk9K(red4FqE?;;ie^6|#T`v%)*gkx<
zhb6Hjcj^efboC8CajR6^W1NbR7dDEKY{+mh7bK4W;s98sH$c-r
zae}p}3jP<`{lqjr!m!wmR=KHnL<}qN~?;dq{0Tx;QY{2{#9O@T_FlPg6GOLlWS
zN(NQTqDtrZW-(u?>ir~y7;-nn$pJmQtLcu-+?vY5dX~{2kYMFGZo2K;*a{ehr|OI=
zuS&+_4>Yc?HGU`K5&a9fq=k=RYmU51%8AX|0j?M1qSn0y97`dq(Cw&^2=pWCz{wbw
zh$rMb=^KU#sbEVdmXWzlyUyKjFxA%fwB@h0#+6saf;^(e%_8n%9t1Rdc_?p5xzies
z3M+4jVQLiiW)`!|7a?dmp69X~cq++-C<&;DCeZ0G0}`U(H?d%p2*=}}xfEK*R(y%7
zqSw12dGhpz7SvV{($6l#zK4MC%k!)ZwW;9`Tt>^b07!g?A&g?W&aT}&)516Q%~GDD
z+a}Qi`1VNWP+b8A%N1x~9{IHa-|%J}Uynj~QI-Zd=aV)kZ#ReKEJ-E1g*$_Legc=I
z#mpjy#C0fB1p~K(gD9vr25hDGos)DX>BHaqL
z9c^@84QBq@3NYIv3haIr`Vzx3>2`aOmqC$HIN+YQh>HvnVy~B>gC^rsSjU{06j_Bi|2aCUlbVnrSo{!m>At
zuhU6v!((!Hv`Zn*Xbqet)dPtm$r`9()HF!+(6U?J|G*TEDjon|QV*ETroQG9RBm#$
zu_cBsier_OMD3|^2DHBRh#C5?Lg={iiom@dg@ak_UQiQv!_mj;vb(t5CVgkZ6BAl;
zMElb?nK)U;$7HJDCRhq97nDojXJ)s)Mn$v527_H7%9dY(StQ_^;s-|Lsi?ryf@;;c;OX{FfO@+>c$S-6(xUqQ1YSrHHFf0|#IQ7TK5tpY}
z$_@DI=%77c5L4bqOz8$AozN0f&SkCQErh|ZmNRjvjF;(JgE;k*#3;6J79TztfUKQJ))QVZH~+%v%OV>GUyZ%sLngl6g+de
zn|l%txF3zxX2ph~py1+A8}m(EN(4ItN=%gqLNWI#7j#rA9aM*i51aXBs3bOA=gyGV
zCFAOzFq27U${=?IRt(7_WbsnGC2qOLsC9isrD0cI5f|>SiPCHp$vM+++hz&Q0uHC
zd(Mxe+hyrhpHm*F54IK0=kaCr=gFf^6b;nRi`)D#8?6=sVam0VwNVm>CDUnmTwZFDObF*G>n=f<~
zX$NR=0-H-GL(8aL4xKyu_Xuu{!U&SwHh~gkhnqO0q9G{bxCf$K;%lvpcInw`=uSl1
z_rZt~EGpF>;HX-D*t~Y-6-l~0%H=jUi}VgaL{mp3OnK!(*<~d
z<1m(Fh7Bqngh8KSfW-DK@1OzL3-igmn?_61DzQ^g`BQmC_LL=Cq*ny(l!kB`Xtna2
zYwdH_;vUFV45xeyZ|<9%#-1#^mkj9Z-mGhV71lBsisz3j-l{hXTF$_Bv&nz{$A4nZ
z?fz2KIm0T|6Y^@Id$UQUdoxCh36Ta%>Ky=sC?M0l0Dy#^dsN?@8_h1ACR5~dpwmUB
zobM8JpX)@8dtNS~4^4+lXqr&!S})N68M60MUM^n%!Tgc%1U!XI!|
z!@v9s`!%U9)7ryYT8NQ|V}W)|sbSnU?H6CZiYvQF`1Cg94{L0-i)a)(VK_lbqaRWE
z+?J)n7t(U58~&Cc9|MRW0VxhQXE8{6Au@w1m%Iwl0%ARsJc6*SK`!V)@S?yh=cL5y
zNJNuoN#EqDH9c&YZWvDE&K6@Xtp-j3ZhavuatJSD`e41CFqLZX93^WP
z0k$mU&otoXs2v$o&!Cs~A5F@>4#MkLo7$%rjO9b8dZk&Ul8{Nm(>@O@VI}r?sKD_k
zmfUWy7j*P-7R!AB7ld#oi^xRYLUYkx-wCU?s#c0t=~lj6P&yz}Jt77PG*c*|z*)2#
z^Ye=O92|`->!psSv+yptv37qp%vdt!tUrRR^;NZTV|`UHl)68{CZRgdPQw=obDruwXVS{lf-9XF^FArdfleP`
zASPOG=b?@7kO_3(o3vk8(;xUCl3fU?2bd$lD}{DV7tO|6ZD0Hj0jr!pY1Vyp5V`V-
z)IfEASS{K?50^k08w#hN=CGri>iZzo5sC&wTWm;>`I((jfH8t+QdKGjrWIaPYAdN(
zo;k?57mPxDa#vl8*DOY=aj3|878G;_@X=mYBZ=yn!
z1)Qn-!*%OL#JjCni3*2CKEvz@qmUuhOII5xswd$r8@)E{4#obkmrSXA=Tt}5o=zd(
zoWVe|Be5Y5v4oiZqKVm=gMl$~4Weh9$&wb)_LZVE+p91+Yt@lIT$hcg54)75vY2v~
znIPx^4w0)KN6<4(W>cpN|4z~jDA;`M
zKZQ9M;eiyLeoa!^dr8Y#_m-}#r|HV1|MAa1vd0C!nY2o9
z$Q2rBunq%5y#M}B{0pKht@@0vD!mb70zW*XdwxMl^KD4Hb$>wbMv;zPhV8RUERb}|
zc07uq&F7j!)J{fO*!>K$3a>s*ewRO-b0;vXC#tE9(?W1TTnxKlkcJoajF&K+&e)8@
zb)_X}hvlIBpz6vSsMb0xMKn#b`6t4
z1sh>f13AEbRH>S@qe{DI#CLgk_&S|h6gln?^k2a38ETqXkaZbf2Our<2N-lj0D>L7
z5aufHzJL{j-3jQ0s{j&=N$p3;EEFWnAdmDYwDE8{MopTxv_$htrguH!Y3F>1R3}5@
z`$HO<<;<*A^VKil$}3V$`4pJ0*9!pPy-xSE`u-<4Ft<{OOAFGB!JxX_EUcq_nKL=>
zRAOWr4D6XL-n$>bNQF%_NCX-AD3bsZXrrP^iqXoBdCy8K{g?38vGtkGW05O;j?kr&
z9EoWTiGF1z7#s@hwxmt{bv7o-gZLD8uCEuhf`&IU?XRQyaeL8r`)*6BQNa6*Xs<%i3H(d`a~WLnI{_bxDn7o9shDInT7`8HmRbYeUTb+PmALYX
z)Y=Vy*l(>-GzfUOyJK3JuWd~K5Xm8z((bY$k+z_D2LW|E;rWO%NdX7z(VpHN`a-#+
zMw;^0w$?0}`3!Oz8x@oPz+1>1&@DTRZ%W?!o@ARbw3U!;HlO2T
zzwe&B#AKLu{3V`_fZ~E{nA?yk3sIOY*xW-nE*HkFL#im}T7>1=KpV)58G_}$snsYM
z0kb2yC<*C874$@xS`AfZ^DF)!-FmH2U>FNlB^nZpZ*;|Gnt;QHPV_yf>~s%LD1EeA
zYe`*1apqO`I14%Gy7Y5)(V6xMEPdP4djZ2qvWt@-O+s);Ob-R)a5VZP|Bo6Es$#}BUp?p;c-I+?2bZ`l*
zaF{1ddwN#V+TNo@1+0B%R>KMk^AiC4&f_>h@aX=r2m=Um=;S{%HgUT+gJd%dS+X#hB
zwmB%?m^gRod9d}Guey~huSiJoD5Ts#Nbq6e!1igKC1%S#-$fH%z?W%li4zf)DY$9@UJ+!liZZzbN94%^*p*l0@cs2i8nvSS
zYIhQ$6|!{GOS*U*_oSW(;4q=*ep_2u&8hED_EE4W-j*+9%W$Mq0G0)R!Uk&tKkKm!a7mr8PW3}gbIKK|Lxig?j%v7g*uHmgQrUDPhZ9@gL)Fdauy5gN>t1t68
z#tzFm;w+R%&`l}?JJ?$$d$VKU<}IA{O&~1J)synFU3o>~jlV`o1J=^R3Fl7zV5uL^
zkOHuiyhpAFZ$--is#nw#$bKgs^SlnGUKMwZYB0C
z5q4xxMnqJHO_bG8u({*-90pMpx(8EXC0)>lA50X3+E@Wh)Lf9imI)i;3$oWm%LHkG
z$uI-TQ)^kyWwnqh%XCE7xr2+yFjULf5GW>qw6m;|9UgjYqS4Ma&ZPJ5mQ7JE^N=x6
zZC+N|OO=eTa%Qs=xXYcE6A@-1)S4d*#jd;}j>unMrLoyAV4sQ4eB7n_T}ok&hcTky
zq3sL;W1Z5^|Jl3~pesR#t*M%$V2R;^GY`yW{wE){I&1|(#xYwAFXGT;!kj1QJHA9*
zvXb=>3B9L`hAXd#S@`R)G}kr?8u_o-RVZQ@xCh%DXyteMpOV?!cw&M>7NZf_p1bY0
zZ*#unE-Z4U&2`2vWc}Q>W{(|9>>D-PxQ&~BPk{iDsZ5gi(2^HKQh$M4Ul%+F|}TwB%O)gm0-zI4PW3MZFhF?B^_12=^IgG&44N-F`2gwM^!&&}O
ziVKE)coL$vE3b&(sQJTFo6WVt%;IS874zTHcy+YnC33OqoK$XFWw2}BOyZF%pRMe%
z#i;uUbi#h6Lo*M_0XjR@)lEO9i`ZD_Oz&jlpt&&gIvU=Pm{#85R*n*K(_}i*I_;8P
zWz!duQrlb=B*mkwbhBBcZ<8{7vTAmDw0l{k+`YetNGF|s*}-9O1B}uQ8Q(J-%2`2|
zE$V~e4gZzI5tcwfov@DkIZ|uMxds>2%y+Vg^i#!W*Z=|Gq{4qImA*9;P~_I1b9G
zkaO&wbSUh~&Y+$?k@1J#G8tb`>+HQcJbLNi*jyZvSkfUY2tt}c57-SWTxQ8724-t=
z8OaPQZ%A)0U?RgQvwaAs!8xpCOzt}Zp`|R=7~Y0
zuUvGhH~kQkF_q>D5R+Zb>kqbV7Rr#j;RX|6TO=#Ae;ofb-T02+^ejdnc0yLXR5dfr
zmyy(z+2mZ1urqTksK^?827lg(*raRQ)daH12fU3uO|~I-dCU9V4e(+$pu&$jyLuCL}VXyczy7lVk;w6zQ&2
z$4JqRrz9qgNw*~&B%@Itfx{-~6ge>VO^A!4ldbW*Q0sEhPeCA*$%%-m@qSd+>#X~#
zwCn3DKy!0e;t!*3fnXIhi?+A-(n3nY)zc7PFpH#UZEk183%
zHoe8L&6SB^{-E7fyUJi0x%qsnL!t8f-|l2I%t?#E(i7J=3}S0OZ;LrehrqR)^@LGki(us(
zO2b?I#FbaXZaj)pw>CBlcCKti1B*jB2CZuu25b?R>`V&Yp{j&te#C!OTfI$H&MhS6
zGnak9ngsg*t0PspVY+W{5yuzOzM;)*fX+Oi;?mm(*TD_?eS|m+14
z=t@a4C0es6m)ztv0CV{bwG@V63T@5=3?N3L3yU}iQA1xg7ZEexT2IPM5DZyXR?-!<
zePM%*_Ns(1pAy%#W`RtSH5)Q5H=5MA9f?+G_2e7^72f1;ck11vzXOcP7*}ab%bAFr
z%*TX55PFhSS3^>xqvxPCbIpfIKj#~8bsOt`ra5p%>uErGazM|gP<5?nKv2?+=`tUq
zsz76~Hnq{a(U+c+t^frO=se|vPB#F3A>q(^4ah0zT+I`-A*D2HFfU*Vxou2Ri=f&>
z+o8(zrW@XB9Im`7LBk)U+pcXC1dX(dL`K^StCYvqC$MZbf8Yli`_AwY_(&snu5_4#
zDaw~BY0%)4M&20BJW(007(YcVtK7-MxA4SU;>xRX^8OIocD+@EsC}NqTG*6Q4zff6
z?h)c?lr#&FMEPf`4cmq9Ryo)?vC5Vpk&6W#cq>qz-s3q`nBWgeCjia5FBU9UUX|nb
z2dHxV1*DX&8ws8dQ3DwaD*@x-5{ZBfpRKxZrD`UX$lPYYZqNCb?CfpWubZ6D%ym%O
zR#WZ)NnuJxD4qt>1Yq?PmA$St1>FYh1A+-G{kZyC2tIF^d|H(RBE|Emb%QG3s&-*X
zuhR6DQP8ko6|?e(z_uGjnAvH3sdQm7ywOV0(`0&{jBMOjqT}EgY-|hEV#Y|iy@--z
zlu}ewkKFetZzFuqFi6NH&tk&L*>k>a81{x#$|WtJ?F%(+w$qH_>2peds3{guP+8mx
z;B6t0xU?G5dc^5=>I@oPh$!YF39Ghdc!SD`FpE3*hGm>t-N_THNK-U}?^t@uGXpeA
zBNpI*zEY5l6_6^jc1_(Mj=ElJ6vUuKigC=YYN!Lj%&8QCdtC*+n`P?bG3x1JtK=t4
zb9VC~09QW^X5%oF;Y9J3gy_N8#VoMGqB9Btg>+K@dlfz&3!7M8O`5*r$
zm&_7&eu*T3r;U(WEUF6Eb#C@x?lU}AIiBD2hAx0f!PCt0MDraXs=ql~>k!dy&^_q}
zLwbANfDMxbHI|9eG}iM09Uu_aX>c9T=IJ8W$T){S0Gs=Kn5H*#god@fRMj+npKnU&
zW>>2xzr~eTB+2n8M7^Al6~WTiq#D?&5r
z<@b|zE`ZGDs-ESYz~J2$73g#ic%*WErsYX-Z=I3m71*U>Va%sw^hQC#bC4VnD3C!&
zM)Rn88TWxknmgd0@&{`EBHn6{`+L}x;H6XT2oNw8%qWpQQHMJ&OQx=|C$D6TsC_r2_>l|h(>6^n
zejTxmANU2O>J;%8zjIlh!KhW@#5l=nz7=MJ8*}_%8-G}WS<*&52?1iGs~`mEY}6kY
zxZW;+`F7rStMB6xV`=bTUV`_>#2cqlJ&nm`W|^YCTM!jpBIf}G*vvLVhCuhlA(!pK
zO~?g9(ehZd;q35Cxtz1jA;tTCf;-z1b6A#H(m=e8*|#_xE%R|9^NH5_I0Nwq1fv!B
zFmq}lH!aul?s6Pe)8oFoG!CJAhZjkR_z+7_XycQPK28dhw+k)g(ZQzzFLGC+L%{==
ztP4Mj>K^O(R_
z7|xl=5T@e!*)k7(m$Io$v37S9eFWM3a*>E|W#e*0&2o;2%?O^7Yp%R1j>sc0y;i`j
zQvM8vIGv#4VcApV-qKULlTK|RQ7A0HxuS
zwicBka|oJp0)@B<7{O(}76^g-s9&+v*zfvU0h+T7UGn^W^#Bga
zT~Fa=0n%+=Gzq5Bc{1nLPu;`48$Z3%e+h1w`_K##S%X$&+x>s&mVAk`FLINmolY@~
z;nyt6WXb3^-LkU2)^7R&0=e?4-1Gi0K^x~Cfztd1Brv4bfSP7ol!f_xc@``l78OxMNm{ovA0s8
z?>GkAmPqQex4#wZ@~(Hi3#6qx0ia`-GBIMCUD>1{8^IY#>08xNd*S#0F=qbl{QDno
z10MA@qAVwUCXf^!5X03D<{i97m~;5G+3?l9YVrQbO;C84jOLkw`sXC1@or__ZD=>TdIq5+k0sXz$^kX~K3K#ZZ+2!^8x
zGfjBRD00DT^aEN*svi|(r`^|a5KD&M&E3+LLj_im0QDpg*SXjh)#?B?Tk_
zMB#625M=C)_rl>c65p~5;t~$A%+8$Zh{}hrW+4b4ZYiR@z&nJ+Z>3L|D_^4RfDYq3
zvfI&Fu01X-WDlQ^)c(!F=lt!j3aP|zR%*?rom`Nd$fMGB1%}S!h|Jc;#z%4%)Mz5W
zm+m4TfH7Xw0{QNGLZk;^Ka
zv0g-{ym7d3m@;7cR%)YPVt1M_!GOLbvjjGZ>nbOZ;;DVcW#!V4AYOtN9_HGUyXsBD
z+fFWkdDY9S9yPoxYgv(W|L|nvkYWB4a6AXu9G-n4qDtBM+~k@Q@J54_xDE-i?witG
z%0Tu5W-0+-@J&-i#J5p!IT%STWIEKIsa=*R&uPX(2}k~jb#`ngizd*Gski&lh(u}q
z6+tz$Pv?n>TxrEmx@{*H#8~`kepjoj8B|dsm!_&00%r(R-Gc&^Z5t?KtA-e8oJ8fV
zZ&IYOjzMfm|By1AQoKv)nq6Juq-$A0A_IkCJ0j-fx>JVi|Lb7?k}sNnCTP6qtU_>73w_dJmgKhSaEU_~FZ!YdVx4u7Ym=CP4jV7yS6F)ESD4d=lQy
z6{_<~JwdGm{zls9*6EdAeMgCKNHoKa>{`qH~h|5>gD?N4LkO`D^afUtzwJ6HdC!t$pQg&nO4iv@LTknxF!ad%E`D&l8A8S
zcK2y-@D`2}PdAEqP^MhbN|-^A%pQMXBw!=x{!AnSe?1j0BYit3>~2Bi?I0{45S<7t
zDtkByu-H-)y^tL9-U;Rz#hA5d1Ry3Jat%duzttxsEiZGw{+dDcN+pAe(k>sq`}iJq
zW%~}`clK{
zv*}DrUYu$g^$hf?+6tt!fVdi<
zZ!;mK{MMDhv5#us`HhEeDPQ^HOY$50OC-Oqx)*=@`ya)oGjz?PB(LEVU6SD_2AhD{%x^KO1wrgoxFpyv7W`%ql7CrxlWxgZYArzUu-
zmch6aF54c%!5!HkrK8Yjn>W5gel5vwWLt|iIC5b+eZYE2k-WYH4Chg=lG-FY<@p
zGV7ILO18&nJwRR;!(sf%Xy5U2qo8L3(XyhrFNA$PFcDhm^a~Z9U@lxURyTy$09A$x
zM_LbT?F%NBKpil~Lc<&z(Ta-p7b*(ck?)KY@s=*=WYFs&5@C6k_!8=pL-bTFjz5BX
z6AZf%s1W;r^~b4Wi6+J1>N0XEI6#iT8{=X?=%MQcRdAY5Cyy~7PZAS$a#5U^KWMmt
zHtpkS*kVqqO%XO`FYHL1PRHCJE+sxp{wnT1nHc#Y<8~1Kl;fvN5u!%VfIThtlZd#R
zWMLtx@+vFP)Ol1De`Gb7Rch!}DTUhR71QhI6qM%P_-UI6&C86RdWkCX|bG2cQ&FTJCeK4DgKK9-WJCao|x8Pw@~
z$gI^4!<$6s>1CiH&eK6zu1XE^d_l2V_ro&Buca;Sf4WQGBfUQh7AD39-nc_ziv;0x)`0#r{+rHyix|mIf!O`#9nrz38o_fH2Erk
zgu#fQ$tt){7CjaBCbR27&wwUKf!b;*@gv~P~1XNyCF6-$O#x#o4(ob-WW8
z?SSKVIGq`p6r3^JrXT@7=7=M`q}C}U3tSV66)r^v)7RP;hB<}uZ)qz%BHt2SI%I1{
zt{Dg1iYKiMJGm&P<`4C5ppg4`E_GM(DP6L~3IVNjBr)8eUU`pbG!O6@B;hm}3?a32
z>cjXFNL1ojK82>(C$gjK+2AM
zKfvT%T*STMEcp<~DzKMRux4e@(QrNj&Bzx6Yb#HHyPv@@TwUShqL`FFpuN#(W}u|&
z$L)*3b=KKaeeV
zf(($Nq%)IZlcR_CSI%Gw2XKOi*X2^llIy3%;z6tOy$f57zg4ySqj
z&Z?i~R9NR~5tdUao$@KRzS7JfHL~t*6xxvdeaEBMT|l!pcNu3~%*I>1)GI`ma7@9c
zz49q*QB#7_*h8=x6g0}0
zQq?^PW2~-la#4JcKP((dK?d(_|HIDKfdHkWVQ|xP(}+r^ti4+{@4huPSu--@&}4HtM%IBQI4b?NhdYa
z9CeliyeEg_yi<}5IsB;$d(L7REGqljEt24VrEHgSQ5vs@P|MiqMBxnZh*2@(ST40V
z975d}shX3E;%fZi+Rb{FFz-GhE9d)QL$Qzx^$|V%(k5^tLw-^>lrfwId@XB;*-lF%
zUzQR{W$9#+<`$LM+d=|
zvv_Ggno9e1s2_ee0c5z6GN&BjbTTNv5eT~Y>=Bd!u%L;W)!t3R_11cGmk5G>XfCH@KNbK1YvF{?}M
z|2m%OrZx>A6Y@Wj_W^4B)tKT{f1r7@p4FfEX)_!e(16l>R6;yL_6tn47sgVCckT`S
zPKK2t2r03jb9xesvM)^_Kb;fXxHZ5{Q4(_IOsyxhd0&RnUG;}Fa}OD*3t$Y-h;t&y
zbqvdns_j6;EA9lOIT<`oxePICaS4#?^@IROlEl!fTBki4jKE_)00ypq|MPj@G`rf4
zp~Sn8eEV?Q^g}oN5YThiO(##;IA1zpRv>3nwZ7&@_<*psR)jrvmWB9(otvx8jHc>l
zwS00k3A>2A43Jq@Kh+>Y$h3@(vrOW~f)4hgf%S;AQ=cB#Txp34nHrn-E`SLk0+A(j!vXy{{DCSfE0LV;=DdLW@jk6I76v?>@nU
zL799Gpz#z6+RHcqpHl2AS(&98<@*D`TmBUx@LjUV8#NRAPQWkKt;YEn5H1A_kJELC
zopAc@-Ud_LMd`!f3!Fhw3FE3|-vpJZcR!-ZuWW%X=L9MLoV>0^%~yiS$puL;Ju0Xp
z6_|mi-C<%#&2|S9X*roL==Kn;0RVZ{GycE-k1K86qM?DHEe#7xRv@WKQnBKleEwGn
z>Z8>tkd@$pewX^!XkIh+gY6NrcjrB|mPqh4;Q5(OxFunD0a7wjF#gN=G}oJ+I+Bfg
zvnXcc4~A{kvv!t!9Nc~y+?Kb)Yi%L}Mnm(>=r)c_V@yB8R>{1P6c|ez=O~Pz+qljW
zSgE>{Z${J}6IHfp|8hDnqu9iD5aZ8~Z+yd)a5(U=AU)6kr`xl`;_uv!rRghqXf%tG
zJkyxbL&JwI
z^}sfC<}@Eq3x`%$8iiOmWai@!KdscV3YSnG$25@O*qQY(8Z$AWHcUlGS+YyPRlW|R
zN5UZHf-XiQQik(6?875=RR_Fs?P1l313=jy1jomf%%0MiF9XmxB{J_JBB-v3g_3Q~
zLGu#%Os83SLT2+qb&5aWv$C4Oi2D@obr?KfWT%P|uC^>SH-YAaccM;?Aa!sFmD@-w
zxC=#grOLz=ZX5BcG(l$t-}v9gxYNr7Pv
z%5+@bIG|&fD2TSt&q*EO$v*PLyZkiSsZiGXD1DSQyplB*WBZD(*}a+IT^?`w1=n;_
z8RNKC(`i&@qSQZ)0
zot{Y$O-<7Tr{#PS9?6u9-$B2Wm=*!aXau_nHYSUl5}$0x=&;c_5^9r2V-cu{b2ld5
z$mpaA2>`sOA`m&I9(7f`1feshxkr%E%o?v?A?$hT0pTZ3xFcp=3)-z0OHb3}|sbf<&xSN_Tn_
zzY%Q+8Vi{ls%@)Tl$CU4A_mivN{?^&lqg4VM)bNV^0DVdP5GP_^c1P!mnmdCs)sk%
zGM@MKt&hn)E{d7;!KC|d|McId<*l2L?QRG;+y~QLY2zMzCqm;C!>0~_*Ud+>B$rqK
zY&36EUg0xksZvqf+oKH{PR&Tk0oSR^g$*&jjrA2F96E(h`1s&y<{|a~>8-ym7xH#+
zp^PHujQ*N8C2>2s06wEtUh}9VzLuqQl#BqC_rk9<@+gzz2j8k?&`$;}xa%z@SYwuJ
zEPNCWFP3LJ3`Jw*mOv7Rqu7i`#!fH9_hdKN60YHMc&&0r0$MgxhPD>=!Y}Z6sE{`|E()>gAb
zR#fbjXHj`Q`o>m>*>yCzG^}v}H>9sBZm7a);G*@@{1ekvXU6KG;)-Nc1Qa;%krL4m
z2d?IqIsFle0mbHn;L8Yt=PuJ(`u5$z=W~BpmD;ZHT?&Cz19o(U)Hh>PDlKIl5;wD>
z-eVF95~sP?IXtrsKfsoqTo7;XuR~Hn?)7ns@Fp7@cxKyOEhZr|+3vaH>j`WRF
zvAeg8iyx412Fe%RRIZ>dZJ)V1_Jmy08B^FVzvZJcnD=hA=QhT6d=s%=1mJ^J5>keO
zo(W&`6%!9MUHIr}#Aith87Ix@Dq4O}EX<}=1Qbg?%427w(#pt369`mDc^Rx>a4w0b
zB@p6Q@<|w4Nu{6=5qCQ%IE7lO#w4aV3Wvlr)uoru(Q<&i#^6b_8)H~2iZOU>vQ+CC
zgHVU?_}E5%9-Ik*xG8OJog6z93aCL6hsFeA2!Ut%!6?BgQrfZteo=wp3JoHTl@{y~
z!CNR+InDO?Ys(6AD-EnYO<4OGsHB}-kf7qRSyF3cr7#QrW_f4May&X}Vr0OFw;Dj{
zr%OxInM^90=;Botpf(}*ARfjnoKJ5{hY&R3@3avMgv3vR6rzH&MJrv~-k2(wL`X*G
zlA+)VpR(QedB#yK0;NZx!(acSQqS@h21vM0dHVV-kp-tZLmwjM@N7}NjLt+3P1y#L
zY$pmIvJoYtG3a0kXvpWuIOQ#DW`x$=J_47w>WzI3G*!z)2R}uNmcfyH&=ZDI^TXb;
zlMCWS{IxbJ^{gDjB;8CPmr0MID%YenZygS}e*_jASzAaY?K2dB>VzCj+O|^WL#1Nn
ztktT0)vb$6q!WzhARmx(<|u*DpI4JbX$rZ?$HAB2CeaQo?WmXUrh4RunAwh)OkyS|
zgE1D5D@{uWY)C^A^;-be_Wbq=2GV#!CHy=e)hGfnRX(cXPw$(5z6^D)jiW;rkSH^O
zq8y!RfoQCF8fbHxtFkv38jBnHv8Z6m$e73OBhsxg?%hZwvHEQ>sC6wRZ@^Nkc4)h5
zEI}X5X%H=6Wpbrel%VBNzq``P5-xpnz($&+>UlH1phrIo%htZ1s^^w!zXEgJRYmqG
z!8mF202xv5^%rtnfhIyd_i34^T>?aNyURVhpfRq+SfR#qDsT*EEayMp=gTS7)lFg$`@I+n%c
zA~>spb|(ZK4MA>wjfQh2*eNr^v9O*3&x0MvTEr(*0WNR`3$niMJfUeaTJDU7fBls!k!ie{W#kVLBD4^ghHRWrslku)8^W|wW?vO-3;$ZQ6#
zXQ)~UC9cPXO5bbOp4~lqM-i-eR=X-H=5r#|r|dg=-aS`~Knk&}#Uq%zR?7f$Kd`jf
z0sqRBmyq4Ny8G9OB43%f@2HUnd}l1Qlq->&h{?*!9Okz97VTlgRVCK*Ye;GrfjeT>`6A!o6ysB)s^txWYO9QZ>gJTyBb}J<4XM#eI{AEWGJE1
z8-d2yU#_`*ZoJ{El;>m@fqp1`+HA=mL|m<8rO$Wr?1&w4t8wwZ)FnVOASzYYdim(6WFwINxzC?a+B%pS!N)Z*)i04%T?XTBNW%=71Bwbds?(r1B1
zoLrES%%gm9wY8EJINo%Qnq*_7Uy|Pud$cPh6=sm}`fl#o_CC0b$s-fy9bU
zM98Sn^A_fF(%@V7Gz>XGvoQR~tje<-JBAb0^maK;%xcUKY7pb4iP);SOk+XED
z*L)$%oLm%Z@Q4PsRx{KTm`2%%m`Jpt6sNi&+pwbLZa7u5h;9`A8S^ST*oCE|7I*Bc
z)48AlSMHy}FUMAd0cfy@_syI%qG$z-b-i->lDaP}xn3`VsFQ^okD%ddRyXqwOFl;6
zz_q;N?gQKGx~|u?F+}~K)P|OonoH}TV?o}
zFel{?AZ6iXMrr+%wYD7L9ilN6Q)oWwIC9$;U>8akw+v$ygJ>uEQqV#}lwOW^*2|F0
z(50j%`^ZGqIX}Ljn5Cu5X?=ae^ElWP??h8Hwj9BEG*Kz6!kmWESb4%y#+L!KJqjAv
zDzyw$?09<*p|G6*VSYV$$E+@J1997f5RiL4(7rkwpCzbFV>K{~BCybZGkh46)T$LA
z@N1<>2BHaC^*T)&dVyAVur=eU__BDJSS(R+<3|i_OmKw
zPA-a3`NL#uwMGV4?_{$qeOr-@ugaU+^K5dr2_C=oZN{cb8>2FOb~lO#Wt>8tw0&li
zxqTpISn@Nb4|g1}w`m7GD3S^7u;&lB5flBo%Q~N&LsN~XjA_46nDED>tTn0`m8&C8
zUIgc~s8dsIq<8WF<%3AN8CuS*u}9Sw@&ZU)^H?&Tb`K$YSZFYkWZ{7bKyXmO3ej6d
zY5+Z)#+tqnfmgU{GWtZ_tUw{gl5+}S
zeXV2Sf#rc_IFi}>1oMg`$zp8449HpDLY8=W0_8YlO%bPLaKE(x!{vr3`6Q$%cHP{t
zq*RSul0?+8lSw+HuB`#61SMzQvFd9$sj*s^5aAC@tu?cBiDR|eq>GOuik-?mkp^*j
ztq8&WAOPly)4TpoEc~@Z4Nt`jnlx5}!ukiqn4|U+C1mio_^l*`b@&?AlFwM0E4~Jo
zIJqd!#vj(oI^V2_bz(-4)SEFYJROCcb?k>Eim`IgvhAcS1FykMNUY~`N9o2AX!8K7
zDn?;F3m(a7)Cbo%2rdzeAe4n;?S?~+=qvCAbK57;D7u+n4oZiY^RcFxh+`D3C6Tz^
z%_lSDNanL!6G{f2h4zWS*MMzBDk4X`8+3?yZ9n!x4HLstmuG{
zMh!E8=UrN#1h)g^AhyO61oTn7jmT$&qH0|TL<4q|BEN1FgeY-WIgO^_3uitnl#RIc$pRq=80qN{Voa&GfQjB>*nWeI70LeV=+MEW8Pnv)<3b9ao?PGAE%ZLm)
z&8y{$``Bz1WkAlOYH}Tg$Hqn3Ww}%>dHxX1O1T7cF$O9dGu-*opj$8wG-wj!VW%
z0iqkn7T}u9#$pkl`48ao5F%sx(>Fe+Itydl4F+cd4rvcL$K;4ocq!7S?f`@3htyl#
z&x!s*{`+nI^Yh}{TD)A_i0;$*6>GV)V@VGz(x5cG0@TBe`~EZJ_uu=Yz}NjrD`QI;
z=&valu{6X%ZulnwLNpEfi8d;zkC2~#Pk^)5GPdOA7x<@)?m!-?18BViK1j{wmq~(K
zE53RIoLrD3*rQ%^y^>`eM-ftJgHgzK>T?DlZB9Q~YMRPwHmy`*p`oY+w!2izvPD`
zl8J~&rbSihtu*-pR;%g@>fz*~n2|rcxn9XyV79q!RQ*_ZWGys`0L@-Ygug9BS{@j?8&w5
z=qCgMV}IGm(OG(!>54%Iw+?g6x-lUd+Wlc<9%gteA%KdruKx&
z<7FAs8wDb
z`^mB_-d#d?C=BV`9oIcl2
zJ6GmYgm$hudw<=y`f4i!=Un3PsoB36a?w=7o#t)A-}rY|gmY7IGnETlnu5Kk`6kwS
zPJ3AOf?>)YRttb(nmzbyxYgHM8Mwf4*ghA&lx)`FJM}#l>`j-eABT<`3i9H`_$
zl+guSY_~vv4_10_?N^H~wBi=Jr4B7J5Yk-7%jl+apA|pbx!lPG2|WH9WsOQLV<;+-
zlUywJkf~^6UqHW}(OGQ@eOy@dy|f>D@n&FwM%_ST3AQDe@dm(1b;+b|K~q*FLQ#U9
zT#5yqxXgP?v&0pAdm-v>T~<<~o(?VAc!-ctEoI?;m)%PKTa-)0r_jO
zH7Z%7{IQ`J7|@=Ze555XuHbXM5;=$zT9Cq@PN5KzU(jHkDY#4rR0B@cox^eU{toD6Bb9lx?6C;Sb`tt3J>Cqa+S8FPwug3QD0X29|mH#2A9$9=gUTsbH5
zB(7P?p=gA`x~tMtN+2YLF9QsVuyi^ft7D3U5MQH!Oret#SY%C&j~{NZCGT
zG5&g7jb_#^xepuV!%v5ZM0^-uj^Uy^PlLsXbI8`0(x+&`bWFNbPT7NKr&Pl>ZF?$J
zE-qe$E@}|UE!5KDZ4edrk-dSK&hHFSp^@R}t2teYpC+gbqzFw=XCVHNP7EYtTH!dj
z5hC(h@y4=@rD;fwbCXtFsZnvhG?$=3NTY=*5;@bs*U*_q9H1Mr(Z!k>oGcaE#ZFoW
zZ>KSrea&(I<-aOzJ-M0QUa|#saL%5OcOl#F%I~7f!Hj$cPBQ_Il(sVqFK|gmC}w}v
zGKU#mjzL7CJYm!>&x9P7P+jptmv<)@B;oO>UfpPAIia1Cjo_Hsyh*?wmnHnoy6bTY
zd>od4rCh?gu=4cmunHN2&K1EjUStSi87v;A7p|RX{1{(WF6T~@$Sebf>^}a{N
z$|BQqgd$7ZLx%Yi3E5Qq*&2o4|GBg^`Tox=20|8g>yjzBQ<%k5R@}lPBpX&NGIAKP
zN<%mCP^QFZj4Ecq=;&YnWy~MQVAl744%q2IstmYq(|UVgeJ~JD|Mg#nqu`d{g_rsM
z--4Urzy1q6#9;*E8DMY-ALiNXUgQY;XyUn(i(+*CFxIXYVN`NURt?fuieu>x$ehR+8^MGPL1-3*@HmJ`q&PxsEi!^|A+DLqs|O$7$PIo(fzV7W}IZ2g3HoYe-@&0G-91c_+^aE
z;d^hVNPGP=7ljPt6VDpW6b#?
zf0%G{rIF!_wl)sSC#1`FmI3i+N#b={LYBt34MGJsqLf$-b2>)iO{Gxk9XO3X>n{N`
zYD!DGS?OBdXeh3%hn+%E|9A&n;?a`&vqmP7k1uTq^=(eOLgB*`6vW9zNs;{F&do{|
zc=$1zg|12$?z};G3Ma2iO}a|k4=R5yT?#gvF=bv+@Hpr~2d6HNH;Z$e)nNBj{grhq
z;dVICe$*!lYBO+GIrN}}Z2TzTYb*N0Ar)a5j;B|7$!N8j7lu5ph?5IqG9IPLo0TlK
z)uJH*T9|MV(PLZM8RL?mojEcTh{rqS6h6F1AWAtP*0fF$sl{l>oe>xgfUsv&RqbK;
z4FeISS-CV$=~8~lsiU;V{n9~?L3ctKNEAu-JkJVo-KZZ?KX-CbY{eha+^jS+_TNE|
zFupRxiR&ntLFRgCTCImOd`)Sl3p}LMSKjkB8Zbv#-85DukU=LLA(wQw{3>0*rL2iQ
zL?5a=olz!H)!7PZN${;Hl*YXCnfiTi2&J82N(Y)6;6G+A*N@OdsD)BOe{0}KJc+bK*fI-*s?tK5#gaA}(
z_aZocEcsP}@+JDqAA{22U;Y>}t+S^Ibt$?G%ShyZ`~IhV-lFb}g2l;2@fQBz=4LgE
zgVp)8ZSp7-+isoeC#oZ$OK#u5{^8GKonsibP#
zHm*p%*j6lTL_;zebx9NVlyW&}n8)mKFk*V8Qe1)02VHA1*Dj>k)+dTg@sFtg7w$`q0&smJ95%XlXx5R>%a@R+~Qp$;J!hIM*|C?X-qpz?f
z$W-Vzl|D)%Wj2j8NNl^E(Ucy~DRgpDed{e>_!}n|#eaCjb?aFhR2{zG9<$XMi@2L6
z09)%s!>?)xXnbi7sRr7O)ZL=;Vg}*upf?S!84bO?rRSKk5R~gpKhiP5Vdohx>%K#~
z0)Yj%V4e%H#p+FN8FLjO3oB_&5ItFywq9&^th}eQ1yu1*0Ty?V|kX_
zJ&>~*Vg&aBisFvZt(;ER$kfP}a^IAx?9XW%fx!~10$5hN!8n3Q3b;EZ@_@|Juwt&Y
z^bV*-&7(e_1b6G7i4tM29cxB4Ii!uGX-vN;@6rHuTG;ulsIl^d$>L}6ayYpl{>P(0
zdULIjG1lZH2;^p#O(oVENR~{xl*U$W+~qaOVtzYL+3YMmMs|Q=2n#RczQ)O%&fV5l
z{EWV#YSJW&a9v~I0OgSZ?~)6nhM&`foKmz-5=!_S6LatBT>ir4oin9yQ_;p1R>ys+jz
zg#bg)PK>4MG)4VRWeplkjdl3wqA+B*OPV1!K{Oe=g*Wfs*jJ^
zgywxc=n(;qqOuX|w80B>1SiuYD%j^k3f>TDS$-D1hY&xcTsEfJ~gVg6TKT~qW}fUr@@df&`spL
zS?MFUxr<5-_?0f!v1|8n8F24;nb}irbO1unUQkxQOT4oaPS}^R&N$h)CoQ}-P2TXD
zQ?7#vndaeOidF3gIGTv{)H!c=&5H_}+{ao0RM4#SsQE)hF{=#F>+sD!!UzB9KY@!*
zqFFS%xv~&(Ao-hQwAsvT*}lqcuJ}$3-~0?#!6S}QD9oe`koBRxK6MCb2X-PrOqFWs
z5U4{{YyXDXW(a`EYVkqK`k!w3RvEd^n$p
z9V;P?$>0o`6fW{jS8!=Ev-p~uOj21gloJM*OP|O?`lUBe8cPQ(+~~|Fpq%%3M#pn&
zu;6r9djj`=De$NaaXmgGvQb@NQn;+#%n@1#zfDYgp$Os+v24{U8EyAud4+=$wxMPv
zu_I>uxOvjw+t-1IG-vqC|z~f)G)XaATfyqbm%Qv
zkTqZ3`-yHw*bNm~qYzPU}3Z*q~^6t7DX3~^35x*lRKuC*?yHLyp5{>fBLQkKh8ni^1XYi*Gv-f*yR
z47KB>^41}Z&>TCb+8^q=C$Lc24XJb@r#-Cs0=1iKMFF)vYIwIAS*xUDGfZp1Ki(pF
zXrF0x3rZOqA+ac21v-KGcE`Vx07~
zFb7LTki(>19cnJSjisOT<&xR=4WW5AxVm8DWJvUcQZk&L<=G2JxN22=DRgjhK}sZ#
zI^(M=YZ<`JPYYk#zPg%30hN#Z<2qwweOt4m&{wVe4pqK#vcNh?%oi>-y-*lMO8lAg
zd~$f^>ZIJ^6it^?axw0WI_*5(p2Kt9H=k|MMm!|6ipWa-!v17OgdPo!|wc?iy{f)
z;T}0N(a4OYOP#BMZ6uD7`&f509jQ)j)jZ4(^-_cJ6c?2!{yEIbiHQMC~~W*-t(_Kdt9y)P8IXkCYLAwde2u&
zC;C>t*FAr+?6X){DqdW+CTh4aETP-u9ztsHIC<7yXlNF1+VVravXcwqr#-emvNqt|
zheRcUh1^Vl>W7+MJ*L`luc?qOJ>P2*u2Xd~2540w^2$w%hUZF5MQS{+^8k8@Cw$?O
zMfi2*B}Gqj&V2q{)+HC7^TA*BQ-oq9t8o$fL%F{4IR2U(t1B5=4|j9=nr%D^2;;_|
zT8;w_Q~yj&_HY06-=}e>wlj(%s?A4CDENY>-@{7?;jmq;3KPV1$Man0XHfTzwC+VO
zVrdu%!zuQvegeEhBPeEl`KGHYh)4UW{Osg{_!fWtmP)30^wDIFz0mE{hB1Kh(pgA0
zxX2$$Z)Oi&3$4$~c;vOM_B@+7b7uGzKTC95+-?z;=vo@$uPsuk*E76=fz=Ervw?e9
z`f$}HHyrowmKJVAqtSev=(gUukmQpNI31TakwH9{N-zXH5P@_b6l4+x4%x(eVhmMh
zT&nNkSEO^+hvt)OK!jWLqylXx7v$;p*Z-g~o{^^>1v8Ky;7evZjH}}~6UtcK_LAjn
z)U4sR(MwPRZ6#5x*c;g9(|t9~$WI*_K=ok^3dIf8f=+FoWMn5NtW#+`Aw;}PiC6Kb
zyIg5CGiC#S+N-^W4|&ehM3HA>=g2yBVM(cKXCPzhgi@3iQqJ3T*`}!D(AK>H0}+w6
zI_ji9(fqPxpf(Q<)GD#sBaPp^RbI#(m{>1#!J}xI19K9D;=zda!gcb^r&o|D@;>LS
zCm?grpxyJFw~C;lqU5mR4->3r$~kprlQ08$0vXp?-q;JKFYW`2pD+*F1(JC-x@Up!lzJn+Wfj&_qXq
zZ%zf}6rLPoVwjqnCfx#qfIH_h#(aiR^)!{kW|~4w<(PZ$2Om~5wUIkVWm_}YLH?F_
z0_DN21mzMpZ^k%|^Sk7acHZndnp~#+guq(lZym^M1LZ0URSg0n3OneK9cr>B?&^>S
zTmwk$TM7WaV4oHwOxlpsOsak+Jus7M5r~v#CjL;(TBDXVyhW_|id(Rqla4>J`1Z}1
zc7j3A>uO!gZ7hH*Q)P#1F~5LtNzOvrL57(sEI|%p)6l+$$=s=4bmqOmWPnAY)iooJ
ze245UTknu=3T^`USmNr^6TFtcF>}jlI(0w77MM=G2ooS?I{q-s8btNS33~g3Z(+C0
zG}yVL=iKir-3u+IabdP24L5PH%AW@8DQ0C&1gY;~{b{{r2Ryx_@;EfYj*zOv*8qXy
zX>6AWjtO|vuqt_Aq}ud_e{*tC@)m#CX04Sqw7$gvARtpnf-=ksKSr8i
zvSV|O907%n_6k{BE9wts)RtFrIC-*CqUUcjub)+$aI(AI9}Zl5lqs&YvX*~rb$j8*
z!O8SB*wN@J6hy_%_4>0i3?_anZ5$j-OK=$WBSuSsCJl{>_j~*{9zl_6M;hHbUPZo6
z_RE&C?YWvHcn)LW0g@>RAxqgqpl^DMDRplL0rMWa%4o?~^!H*GNt3Gs^dnDh0Zt|?
z0b!?l7)yK1w$)imXH+Lo7w8{uGT%4p&HV2wU*Nl*LTH~6BJQTeAL9a}rust=pxOebO`s{rMlc|R&
zerT~?ECjfAnbrKCnhI}ZnB?d-J`wvinjJ?Bpgyo=BAwyy4%ndOkhC{ENPsHUj
zAUT+tHhy4RM>@u=j3zx+w)g$dIZdhVs~=yh7l9P6NK^F(xz?*|8O8FV`b9A|PGoH7
z0;?}&E(xx(ML~O0-ugDMBqak|IUU@Z=TN3%d=8T-`RvyB?rF|IydD)fuAl*N`I1mK
z$bXDkYvyCP%z{{_AHrf~eVEjXI3`cKVJa)NLhObyYw`!()>~P^*6nTp;o`N5;0X+V
zoB-bjNYSRMBFFLI)2N_B1B^d}5=%N;p*j}7vlWm
zbTUuTJ$P0+TVr@^I={MdrJ24O+TscQ)6fKCC3?WNtl;N=)j`$|6+cZe
z23q7y>+cF6re`A{+W}GwdiOZI4RAScnGl)af{=p*69nhQfK+Y^m>Uhc_A#Uz!dpHX
z-GbL`O~8{oa1W4p2)YK~jYzqU29xO^_zJ2=L-G5cp|&ZX%D3zczu{@<$xbec>G=bL
z*#exQ{}jixZ7pZ%WuY>$vXiN9#Xt->QAsoe*|)b&
z0}@`BuP}`d=udh}?||VG%Sd>Q8G(Fpr(W|TBMKX_RsBL{qN5Gjr*hgs;|a+83#9@6
zSe%V|E2FzI{I~!0|0ugI{a8F8_2w8yW&lh(w}&AyRZjYj;n(iMA)l(1H(bzvQg^i*
zbg4&QA$|i^8l3SRZabKu(X=(;`L>$FWc$py?}g`VP0o2so_3k=v=(BQi9N0UK+i^f
zErU7c