update
This commit is contained in:
@@ -66,10 +66,10 @@ Then go to `http://localhost:8501` in your browser to run TexTeller in the web.
|
||||
We use [ray serve](https://github.com/ray-project/ray) to provide a simple API for using TexTeller in your own projects. To start the server, navigate to the `TexTeller/src` directory and run the following command:
|
||||
|
||||
```bash
|
||||
python serve.py # default settings
|
||||
python server.py # default settings
|
||||
```
|
||||
|
||||
You can pass the following arguments to the `serve.py` script to get custom inference settings(e.g. `python serve.py --use_gpu` to enable GPU inference):
|
||||
You can pass the following arguments to the `server.py` script to get custom inference settings(e.g. `python server.py --use_gpu` to enable GPU inference):
|
||||
|
||||
| Argument | Description |
|
||||
| --- | --- |
|
||||
|
||||
@@ -7,3 +7,4 @@ ray[serve]
|
||||
accelerate
|
||||
tensorboardX
|
||||
nltk
|
||||
python-multipart
|
||||
@@ -7,8 +7,4 @@ with open(img_path, 'rb') as img:
|
||||
files = {'img': img}
|
||||
response = requests.post(url, files=files)
|
||||
|
||||
# data = {"img_path": img_path}
|
||||
|
||||
# response = requests.post(url, json=data)
|
||||
|
||||
print(response.text)
|
||||
|
||||
@@ -21,6 +21,7 @@ def inference(
|
||||
if isinstance(imgs_path[0], str):
|
||||
imgs = convert2rgb(imgs_path)
|
||||
else: # already numpy array(rgb format)
|
||||
assert isinstance(imgs_path[0], np.ndarray)
|
||||
imgs = imgs_path
|
||||
imgs = inference_transform(imgs)
|
||||
pixel_values = torch.stack(imgs)
|
||||
|
||||
@@ -67,6 +67,7 @@ class Ingress:
|
||||
img_rb = await form['img'].read()
|
||||
|
||||
img_nparray = np.frombuffer(img_rb, np.uint8)
|
||||
img_nparray = cv2.imdecode(img_nparray, cv2.IMREAD_COLOR)
|
||||
img_nparray = cv2.cvtColor(img_nparray, cv2.COLOR_BGR2RGB)
|
||||
pred = await self.texteller_server.predict.remote(img_nparray)
|
||||
return pred
|
||||
|
||||
Reference in New Issue
Block a user