nazounoryuu commited on
Commit
9cf75ac
·
1 Parent(s): 4ae1e06

update logic to use cached results or not

Browse files
Files changed (2) hide show
  1. .gitignore +1 -0
  2. app/main.py +8 -6
.gitignore CHANGED
@@ -10,4 +10,5 @@ temp/*
10
  models/*
11
  __pycache__
12
  .gradio
 
13
  # **/examples/*
 
10
  models/*
11
  __pycache__
12
  .gradio
13
+ .cache
14
  # **/examples/*
app/main.py CHANGED
@@ -7,7 +7,7 @@ PROJECT_DIR = Path(__file__).parent.parent
7
  sys.path.append(str(PROJECT_DIR))
8
 
9
  # Need this to be able to write cache on HF Space
10
- HF_HOME = "./cache/huggingface"
11
  HF_MODULES_CACHE = HF_HOME + "/modules"
12
  os.environ["HF_HOME"] = HF_HOME
13
  os.environ["HF_MODULES_CACHE"] = HF_MODULES_CACHE
@@ -135,6 +135,8 @@ def run_htr_pipeline(
135
 
136
  # Cache result from previous run
137
  cache_path = Path(OUTPUT_CACHE_DIR) / Path(image_name).with_suffix(".json")
 
 
138
 
139
  if use_cache and cache_path.exists():
140
  progress(0.5, desc="Cache found, loading cache...")
@@ -187,7 +189,7 @@ with gr.Blocks(title="submit") as submit:
187
  container=False,
188
  )
189
  # device = gr.Dropdown(choices=["cpu", "cuda"], label="Device", value="cpu", interactive=True)
190
- use_cache = gr.Radio(["Yes", "No"], label="Use cached result?", value="Yes", interactive=True)
191
  run_btn = gr.Button("Transcribe")
192
 
193
  # Output tab
@@ -228,11 +230,11 @@ with gr.Blocks(
228
  1. Detect text lines from the page image
229
  2. Perform text recognition on detected lines
230
 
231
- This space does not have access to GPU and default to using CPU.
232
- Inference from scratch will be extremely slow, so I cached example results to disk. Some notes:
233
 
234
- - To view example outputs, select one image from the examples, and choose "Yes" under **Used cached result**.
235
- To transcribe an example from scratch, choose "No".
236
  - New images uploaded will be transcribed from scratch.
237
  """)
238
 
 
7
  sys.path.append(str(PROJECT_DIR))
8
 
9
  # Need this to be able to write cache on HF Space
10
+ HF_HOME = ".cache/huggingface"
11
  HF_MODULES_CACHE = HF_HOME + "/modules"
12
  os.environ["HF_HOME"] = HF_HOME
13
  os.environ["HF_MODULES_CACHE"] = HF_MODULES_CACHE
 
135
 
136
  # Cache result from previous run
137
  cache_path = Path(OUTPUT_CACHE_DIR) / Path(image_name).with_suffix(".json")
138
+
139
+ print(use_cache)
140
 
141
  if use_cache and cache_path.exists():
142
  progress(0.5, desc="Cache found, loading cache...")
 
189
  container=False,
190
  )
191
  # device = gr.Dropdown(choices=["cpu", "cuda"], label="Device", value="cpu", interactive=True)
192
+ use_cache = gr.Radio([True, False], label="Use cached result", value=True, interactive=True)
193
  run_btn = gr.Button("Transcribe")
194
 
195
  # Output tab
 
230
  1. Detect text lines from the page image
231
  2. Perform text recognition on detected lines
232
 
233
+ This space does not have access to GPU.
234
+ Inference on CPU will be extremely slow, so I cached example results to disk. Some notes:
235
 
236
+ - To view example outputs, select one image from the examples, and choose **Used cached result: True**.
237
+ To transcribe an example from scratch, choose **False**.
238
  - New images uploaded will be transcribed from scratch.
239
  """)
240