|
| 1 | +import io |
| 2 | +import json |
| 3 | +import os |
| 4 | +import zipfile |
| 5 | + |
| 6 | +import lokalise |
| 7 | +import numpy as np |
| 8 | +import pandas as pd |
| 9 | +import requests |
| 10 | +import streamlit as st |
| 11 | +from dotenv import load_dotenv |
| 12 | + |
| 13 | +load_dotenv() |
| 14 | +LOKALISE_API_KEY = os.getenv("LOKALISE_API_KEY") |
| 15 | +LOKALISE_PROJECT_ID = os.getenv("LOKALISE_PROJECT_ID") |
| 16 | +LANGUAGE = "nl" |
| 17 | + |
| 18 | + |
| 19 | +class LokaliseTranslator: |
| 20 | + def __init__(self, api_key: str, project_id: str, language: str) -> None: |
| 21 | + self.client = lokalise.Client(api_key) |
| 22 | + self.project_id = project_id |
| 23 | + self.language = language |
| 24 | + self.translations = self.get_translations() |
| 25 | + |
| 26 | + def get_translations(self) -> dict[str, str]: |
| 27 | + response = self.client.download_files( |
| 28 | + self.project_id, |
| 29 | + {"format": "json", "original_filenames": True, "replace_breaks": False}, |
| 30 | + ) |
| 31 | + translations_url = response["bundle_url"] |
| 32 | + |
| 33 | + # Download and extract the ZIP file |
| 34 | + zip_response = requests.get(translations_url) |
| 35 | + zip_file = zipfile.ZipFile(io.BytesIO(zip_response.content)) |
| 36 | + |
| 37 | + # Find the JSON file corresponding to the selected language |
| 38 | + json_filename = f"{self.language}/no_filename.json" |
| 39 | + with zip_file.open(json_filename) as json_file: |
| 40 | + return json.load(json_file) |
| 41 | + |
| 42 | + def __call__(self, key: str) -> str: |
| 43 | + return self.translations.get(key, key) |
| 44 | + |
| 45 | + |
| 46 | +translator = LokaliseTranslator(LOKALISE_API_KEY, LOKALISE_PROJECT_ID, LANGUAGE) |
| 47 | + |
| 48 | +st.title(translator("dashboard_title")) |
| 49 | + |
| 50 | +DATE_COLUMN = "date/time" |
| 51 | +DATA_URL = ( |
| 52 | + "https://s3-us-west-2.amazonaws.com/streamlit-demo-data/uber-raw-data-sep14.csv.gz" |
| 53 | +) |
| 54 | + |
| 55 | + |
| 56 | +@st.cache_data |
| 57 | +def load_data(nrows): |
| 58 | + data = pd.read_csv(DATA_URL, nrows=nrows) |
| 59 | + data.rename(lambda x: str(x).lower(), axis="columns", inplace=True) |
| 60 | + data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN]) |
| 61 | + return data |
| 62 | + |
| 63 | + |
| 64 | +data_load_state = st.text(translator("loading_data")) |
| 65 | +data = load_data(10000) |
| 66 | +data_load_state.text(translator("done")) |
| 67 | + |
| 68 | +if st.checkbox(translator("show_raw_data")): |
| 69 | + st.subheader(translator("raw_data")) |
| 70 | + st.write(data) |
| 71 | + |
| 72 | +st.subheader(translator("nb_pickups_hour")) |
| 73 | +hist_values = np.histogram(data[DATE_COLUMN].dt.hour, bins=24, range=(0, 24))[0] |
| 74 | +st.bar_chart(hist_values) |
| 75 | + |
| 76 | +# Some number in the range 0-23 |
| 77 | +hour_to_filter = st.slider("hour", 0, 23, 17) |
| 78 | +filtered_data = data[data[DATE_COLUMN].dt.hour == hour_to_filter] |
| 79 | + |
| 80 | +st.subheader(translator("map_all_pickups") % hour_to_filter) |
| 81 | +st.map(filtered_data) |
0 commit comments