Initial commit of akmon project

This commit is contained in:
2026-01-20 08:04:15 +08:00
commit 77a2bab985
1309 changed files with 343305 additions and 0 deletions

123
doc_chat/alter_policies.sql Normal file
View File

@@ -0,0 +1,123 @@
begin;
-- Allow chat messages to store new audio payloads.
alter table chat_messages
drop constraint if exists chat_messages_content_type_check;
alter table chat_messages
add constraint chat_messages_content_type_check
check (content_type in ('text', 'markdown', 'json', 'image', 'file', 'audio'));
-- Track the ingress source for every message.
alter table chat_messages
add column if not exists ingress_type text not null default 'manual'
check (ingress_type in ('manual', 'bluetooth', 'school_gateway', 'system'));
comment on column chat_messages.ingress_type is
'Source of message ingestion: manual app entry, bluetooth wearable sync, school gateway, or system automation.';
-- Refresh insert policies so authenticated users and gateways follow the new ingress contract.
drop policy if exists chat_messages_insert on chat_messages;
drop policy if exists chat_messages_insert_self on chat_messages;
drop policy if exists chat_messages_insert_gateway on chat_messages;
create policy chat_messages_insert_self on chat_messages
for insert
to authenticated
with check (
chat_messages.sender_id = auth.uid()
and chat_messages.ingress_type in ('manual', 'bluetooth')
and exists (
select 1
from chat_participants p
where p.conversation_id = chat_messages.conversation_id
and p.user_id = auth.uid()
)
);
create policy chat_messages_insert_gateway on chat_messages
for insert
to authenticated
with check (
coalesce((current_setting('request.jwt.claims', true)::jsonb ->> 'app_role'), '') in ('gateway_ingress', 'system_bot')
and chat_messages.ingress_type in ('school_gateway', 'system')
);
-- Fix chat participant policies to avoid recursive RLS evaluation.
create or replace function public.chat_user_is_manager(p_conversation_id uuid, p_user_id uuid)
returns boolean
language sql
security definer
set search_path = public
as $$
select exists (
select 1
from chat_participants
where conversation_id = p_conversation_id
and user_id = p_user_id
and role in ('owner', 'admin')
);
$$;
revoke all on function public.chat_user_is_manager(uuid, uuid) from public;
grant execute on function public.chat_user_is_manager(uuid, uuid) to authenticated;
drop policy if exists chat_conversations_update on chat_conversations;
create policy chat_conversations_update on chat_conversations for update to authenticated using (
public.chat_user_is_manager(chat_conversations.id, auth.uid())
) with check (
public.chat_user_is_manager(chat_conversations.id, auth.uid())
);
drop policy if exists chat_participants_select on chat_participants;
drop policy if exists chat_participants_update on chat_participants;
drop policy if exists chat_participants_delete on chat_participants;
drop policy if exists chat_participants_insert on chat_participants;
create policy chat_participants_select on chat_participants for select to authenticated using (
user_id = auth.uid() or public.chat_user_is_manager(chat_participants.conversation_id, auth.uid())
);
create policy chat_participants_update on chat_participants for update to authenticated using (
user_id = auth.uid() or public.chat_user_is_manager(chat_participants.conversation_id, auth.uid())
) with check (
user_id = auth.uid() or public.chat_user_is_manager(chat_participants.conversation_id, auth.uid())
);
create policy chat_participants_delete on chat_participants for delete to authenticated using (
user_id = auth.uid() or public.chat_user_is_manager(chat_participants.conversation_id, auth.uid())
);
create policy chat_participants_insert on chat_participants for insert to authenticated with check (
user_id = auth.uid() or public.chat_user_is_manager(chat_participants.conversation_id, auth.uid())
);
-- Ensure downlink queue has optional metadata payload for automation hints.
alter table chat_mqtt_downlinks
add column if not exists metadata jsonb;
commit;
drop policy if exists chat_participants_all on chat_participants;
drop policy if exists chat_participants_select on chat_participants;
drop policy if exists chat_participants_update on chat_participants;
drop policy if exists chat_participants_delete on chat_participants;
drop policy if exists chat_participants_insert on chat_participants;
create policy chat_participants_select on chat_participants for select to authenticated using (
user_id = auth.uid() or public.chat_user_is_manager(chat_participants.conversation_id, auth.uid())
);
create policy chat_participants_update on chat_participants for update to authenticated using (
user_id = auth.uid() or public.chat_user_is_manager(chat_participants.conversation_id, auth.uid())
) with check (
user_id = auth.uid() or public.chat_user_is_manager(chat_participants.conversation_id, auth.uid())
);
create policy chat_participants_delete on chat_participants for delete to authenticated using (
user_id = auth.uid() or public.chat_user_is_manager(chat_participants.conversation_id, auth.uid())
);
create policy chat_participants_insert on chat_participants for insert to authenticated with check (
user_id = auth.uid() or public.chat_user_is_manager(chat_participants.conversation_id, auth.uid())
);

View File

@@ -0,0 +1,318 @@
-- Chat schema for 1:1 and group conversations (Supabase-friendly)
-- Tables: chat_conversations, chat_participants, chat_messages, chat_notifications
create table if not exists chat_conversations (
id uuid primary key default gen_random_uuid(),
title text,
is_group boolean not null default false,
owner_id uuid references public.ak_users(id) on delete set null,
last_message_at timestamptz,
metadata jsonb,
created_at timestamptz not null default now(),
updated_at timestamptz not null default now()
);
create table if not exists chat_participants (
id uuid primary key default gen_random_uuid(),
conversation_id uuid not null references chat_conversations(id) on delete cascade,
user_id uuid not null references public.ak_users(id) on delete cascade,
role text not null default 'member' check (role in ('owner','admin','member')),
joined_at timestamptz not null default now(),
last_read_at timestamptz,
is_muted boolean not null default false,
settings jsonb,
created_at timestamptz not null default now(),
updated_at timestamptz not null default now(),
unique(conversation_id, user_id)
);
create table if not exists chat_messages (
id uuid primary key default gen_random_uuid(),
conversation_id uuid not null references chat_conversations(id) on delete cascade,
sender_id uuid not null references public.ak_users(id) on delete cascade,
content text not null,
content_type text not null default 'text' check (content_type in ('text','markdown','json','image','file','audio')),
reply_to uuid references chat_messages(id) on delete set null,
metadata jsonb,
created_at timestamptz not null default now(),
updated_at timestamptz not null default now()
);
create index if not exists idx_chat_messages_conv_time on chat_messages(conversation_id, created_at desc);
create index if not exists idx_chat_participants_conv on chat_participants(conversation_id);
create index if not exists idx_chat_participants_user on chat_participants(user_id);
create index if not exists idx_chat_notifications_user_time on chat_notifications(user_id, created_at desc);
create table if not exists chat_notifications (
id uuid primary key default gen_random_uuid(),
user_id uuid not null references public.ak_users(id) on delete cascade,
conversation_id uuid references chat_conversations(id) on delete cascade,
message_id uuid references chat_messages(id) on delete cascade,
type text not null default 'message',
is_read boolean not null default false,
created_at timestamptz not null default now()
);
-- updated_at triggers
create or replace function public.set_updated_at()
returns trigger language plpgsql as $$
begin
new.updated_at = now();
return new;
end $$;
drop trigger if exists trg_chat_conversations_updated on chat_conversations;
create trigger trg_chat_conversations_updated before update on chat_conversations for each row execute function public.set_updated_at();
drop trigger if exists trg_chat_participants_updated on chat_participants;
create trigger trg_chat_participants_updated before update on chat_participants for each row execute function public.set_updated_at();
drop trigger if exists trg_chat_messages_updated on chat_messages;
create trigger trg_chat_messages_updated before update on chat_messages for each row execute function public.set_updated_at();
-- On new message: update conversation last_message_at and create notifications for participants except sender
create or replace function public.chat_on_message_insert()
returns trigger language plpgsql as $$
begin
-- update last_message_at
update chat_conversations set last_message_at = new.created_at where id = new.conversation_id;
-- insert notifications for all participants except sender
insert into chat_notifications(user_id, conversation_id, message_id, type, is_read, created_at)
select p.user_id, new.conversation_id, new.id, 'message', false, now()
from chat_participants p
where p.conversation_id = new.conversation_id
and p.user_id <> new.sender_id;
return new;
end $$;
drop trigger if exists trg_chat_messages_after_insert on chat_messages;
create trigger trg_chat_messages_after_insert
after insert on chat_messages
for each row execute function public.chat_on_message_insert();
-- RLS (simplified)
alter table chat_conversations enable row level security;
alter table chat_participants enable row level security;
alter table chat_messages enable row level security;
alter table chat_notifications enable row level security;
-- Helper to check whether a user is owner/admin of a conversation without triggering RLS recursion
create or replace function public.chat_user_is_manager(p_conversation_id uuid, p_user_id uuid)
returns boolean
language sql
security definer
set search_path = public
as $$
select exists (
select 1
from chat_participants
where conversation_id = p_conversation_id
and user_id = p_user_id
and role in ('owner', 'admin')
);
$$;
revoke all on function public.chat_user_is_manager(uuid, uuid) from public;
grant execute on function public.chat_user_is_manager(uuid, uuid) to authenticated;
-- Policies: users can access conversations they participate in
create policy chat_conversations_select on chat_conversations for select to authenticated using (
exists (select 1 from chat_participants p where p.conversation_id = chat_conversations.id and p.user_id = auth.uid())
);
create policy chat_conversations_insert on chat_conversations for insert to authenticated with check (true);
create policy chat_conversations_update on chat_conversations for update to authenticated using (
public.chat_user_is_manager(chat_conversations.id, auth.uid())
) with check (
public.chat_user_is_manager(chat_conversations.id, auth.uid())
);
-- Split policies for clarity: SELECT/UPDATE/DELETE cover members and admins; INSERT allows admins to add others
drop policy if exists chat_participants_all on chat_participants;
create policy chat_participants_select on chat_participants for select to authenticated using (
user_id = auth.uid() or public.chat_user_is_manager(chat_participants.conversation_id, auth.uid())
);
create policy chat_participants_update on chat_participants for update to authenticated using (
user_id = auth.uid() or public.chat_user_is_manager(chat_participants.conversation_id, auth.uid())
) with check (
user_id = auth.uid() or public.chat_user_is_manager(chat_participants.conversation_id, auth.uid())
);
create policy chat_participants_delete on chat_participants for delete to authenticated using (
user_id = auth.uid() or public.chat_user_is_manager(chat_participants.conversation_id, auth.uid())
);
create policy chat_participants_insert on chat_participants for insert to authenticated with check (
-- allow user to add self OR owners/admins to add others
user_id = auth.uid() or public.chat_user_is_manager(chat_participants.conversation_id, auth.uid())
);
create policy chat_messages_select on chat_messages for select to authenticated using (
exists (select 1 from chat_participants p where p.conversation_id = chat_messages.conversation_id and p.user_id = auth.uid())
);
create policy chat_messages_insert on chat_messages for insert to authenticated with check (
exists (select 1 from chat_participants p where p.conversation_id = chat_messages.conversation_id and p.user_id = auth.uid())
);
create policy chat_notifications_select on chat_notifications for select to authenticated using (user_id = auth.uid());
create policy chat_notifications_update on chat_notifications for update to authenticated using (user_id = auth.uid()) with check (user_id = auth.uid());
-- ---------------------------------------------------------------------------
-- Chat MQTT downlinks: queue for device/edge commands tied to conversations
-- ---------------------------------------------------------------------------
create table if not exists chat_mqtt_downlinks (
id uuid primary key default gen_random_uuid(),
conversation_id uuid not null references chat_conversations(id) on delete cascade,
-- optional: specific participant user as target (用于映射设备/用户)
target_user_id uuid references public.ak_users(id) on delete set null,
-- 可选:显式下发表的 topic未提供时由网关按约定推导如 device/{user_id}/down
topic text,
-- 载荷与编码utf8/base64/json
payload text not null,
payload_encoding text not null default 'utf8' check (payload_encoding in ('utf8','base64','json')),
qos smallint not null default 1 check (qos in (0,1,2)),
retain boolean not null default false,
-- 生命周期
status text not null default 'pending' check (status in ('pending','sent','acked','failed','expired','canceled')),
scheduled_at timestamptz not null default now(),
expires_at timestamptz,
sent_at timestamptz,
ack_at timestamptz,
retry_count integer not null default 0,
last_error text,
metadata jsonb,
-- 关联/审计
correlation_id uuid,
created_by uuid not null references public.ak_users(id) on delete cascade,
created_at timestamptz not null default now(),
updated_at timestamptz not null default now()
);
create index if not exists idx_chat_mqtt_downlinks_conv_status_time on chat_mqtt_downlinks(conversation_id, status, scheduled_at asc);
create index if not exists idx_chat_mqtt_downlinks_creator_time on chat_mqtt_downlinks(created_by, created_at desc);
create index if not exists idx_chat_mqtt_downlinks_topic on chat_mqtt_downlinks(topic);
drop trigger if exists trg_chat_mqtt_downlinks_updated on chat_mqtt_downlinks;
create trigger trg_chat_mqtt_downlinks_updated before update on chat_mqtt_downlinks for each row execute function public.set_updated_at();
alter table chat_mqtt_downlinks enable row level security;
-- 下发表的访问控制:
-- 1) 参与该会话的用户可读取对应 conversation 的下发记录;
-- 2) 插入:必须是会话参与者,且 created_by = auth.uid()
-- 3) 更新/删除:消息创建者,或会话 owner/admin 可操作(用于重试、取消、标记 ACK 等)
create policy chat_mqtt_downlinks_select on chat_mqtt_downlinks for select to authenticated using (
exists (
select 1 from chat_participants p
where p.conversation_id = chat_mqtt_downlinks.conversation_id
and p.user_id = auth.uid()
)
);
create policy chat_mqtt_downlinks_insert on chat_mqtt_downlinks for insert to authenticated with check (
created_by = auth.uid() and exists (
select 1 from chat_participants p
where p.conversation_id = chat_mqtt_downlinks.conversation_id
and p.user_id = auth.uid()
)
);
create policy chat_mqtt_downlinks_update on chat_mqtt_downlinks for update to authenticated using (
created_by = auth.uid() or exists (
select 1 from chat_participants p
where p.conversation_id = chat_mqtt_downlinks.conversation_id
and p.user_id = auth.uid()
and p.role in ('owner','admin')
)
) with check (
created_by = auth.uid() or exists (
select 1 from chat_participants p
where p.conversation_id = chat_mqtt_downlinks.conversation_id
and p.user_id = auth.uid()
and p.role in ('owner','admin')
)
);
create policy chat_mqtt_downlinks_delete on chat_mqtt_downlinks for delete to authenticated using (
created_by = auth.uid() or exists (
select 1 from chat_participants p
where p.conversation_id = chat_mqtt_downlinks.conversation_id
and p.user_id = auth.uid()
and p.role in ('owner','admin')
)
);
-- 可选:待发送视图,供后台/报表快速查询
create or replace view chat_mqtt_downlinks_pending as
select * from chat_mqtt_downlinks where status = 'pending' and scheduled_at <= now();
-- ---------------------------------------------------------------------------
-- Gateway reporting (chat_gateway_nodes + chat_gateway_heartbeats)
-- ---------------------------------------------------------------------------
-- Nodes registry: one row per running gateway clientId
create table if not exists chat_gateway_nodes (
id uuid primary key default gen_random_uuid(),
name text not null,
mqtt_client_id text not null unique,
version text,
region text,
tags jsonb,
created_at timestamptz not null default now(),
updated_at timestamptz not null default now()
);
drop trigger if exists trg_chat_gateway_nodes_updated on chat_gateway_nodes;
create trigger trg_chat_gateway_nodes_updated
before update on chat_gateway_nodes
for each row execute function public.set_updated_at();
alter table chat_gateway_nodes enable row level security;
-- Read-all for authenticated (dashboard/ops)
create policy chat_gateway_nodes_select on chat_gateway_nodes
for select to authenticated using (true);
-- Heartbeats: periodic metrics from gateway
create table if not exists chat_gateway_heartbeats (
id uuid primary key default gen_random_uuid(),
gateway_id uuid not null references chat_gateway_nodes(id) on delete cascade,
created_at timestamptz not null default now(),
uptime_sec integer,
mem_rss_mb integer,
heap_used_mb integer,
mqtt_connected boolean,
kafka_connected boolean,
redis_connected boolean,
msgs_in integer,
msgs_out integer,
msgs_dropped integer,
errors integer,
acl_denied integer,
kafka_produced integer,
extra jsonb
);
create index if not exists idx_chat_gateway_heartbeats_gateway_time
on chat_gateway_heartbeats(gateway_id, created_at desc);
alter table chat_gateway_heartbeats enable row level security;
-- Read-all for authenticated (dashboard/ops)
create policy chat_gateway_heartbeats_select on chat_gateway_heartbeats
for select to authenticated using (true);
-- Latest status per gateway (view)
create or replace view chat_gateway_status_latest as
with last as (
select distinct on (gateway_id)
gateway_id, id as heartbeat_id, created_at, uptime_sec, mem_rss_mb,
heap_used_mb, mqtt_connected, kafka_connected, redis_connected,
msgs_in, msgs_out, msgs_dropped, errors, acl_denied, kafka_produced, extra
from chat_gateway_heartbeats
order by gateway_id, created_at desc
)
select l.*, n.name, n.mqtt_client_id, n.version, n.region, n.tags, n.updated_at as node_updated_at
from last l
join chat_gateway_nodes n on n.id = l.gateway_id;

View File

@@ -0,0 +1,151 @@
-- Gateway reporting tables for MQTT gateway nodes and periodic heartbeats
-- Requires Supabase/Postgres environment. Assumes helper is_admin(uid uuid) exists.
-- UUID generation
create extension if not exists pgcrypto;
-- 1) Gateway registry
create table if not exists public.gateway_nodes (
id uuid primary key default gen_random_uuid(),
name text,
mqtt_client_id text unique not null,
version text,
region text,
owner_user_id uuid references auth.users(id),
tags jsonb,
created_at timestamptz not null default now(),
updated_at timestamptz not null default now()
);
create index if not exists idx_gateway_nodes_owner on public.gateway_nodes(owner_user_id);
create or replace function public.set_updated_at()
returns trigger language plpgsql as $$
begin
new.updated_at = now();
return new;
end;$$;
drop trigger if exists trg_gateway_nodes_updated_at on public.gateway_nodes;
create trigger trg_gateway_nodes_updated_at
before update on public.gateway_nodes
for each row execute function public.set_updated_at();
alter table public.gateway_nodes enable row level security;
-- RLS: owners and admins can read; only admins can write via client. Service role bypasses RLS.
do $$
begin
if not exists (
select 1 from pg_policies where schemaname='public' and tablename='gateway_nodes' and policyname='gateway_nodes_select') then
create policy gateway_nodes_select on public.gateway_nodes
for select using (
auth.uid() = owner_user_id or coalesce(public.is_admin(auth.uid()), false)
);
end if;
if not exists (
select 1 from pg_policies where schemaname='public' and tablename='gateway_nodes' and policyname='gateway_nodes_update_owner') then
create policy gateway_nodes_update_owner on public.gateway_nodes
for update using (
auth.uid() = owner_user_id or coalesce(public.is_admin(auth.uid()), false)
) with check (
auth.uid() = owner_user_id or coalesce(public.is_admin(auth.uid()), false)
);
end if;
if not exists (
select 1 from pg_policies where schemaname='public' and tablename='gateway_nodes' and policyname='gateway_nodes_insert_owner') then
create policy gateway_nodes_insert_owner on public.gateway_nodes
for insert with check (
auth.uid() = owner_user_id or coalesce(public.is_admin(auth.uid()), false)
);
end if;
end$$;
-- 2) Heartbeats (periodic runtime metrics). Use service role to insert.
create table if not exists public.gateway_heartbeats (
id uuid primary key default gen_random_uuid(),
gateway_id uuid not null references public.gateway_nodes(id) on delete cascade,
ts timestamptz not null default now(),
uptime_sec integer,
mem_rss_mb integer,
heap_used_mb integer,
mqtt_connected boolean,
kafka_connected boolean,
redis_connected boolean,
msgs_in integer default 0,
msgs_out integer default 0,
msgs_dropped integer default 0,
errors integer default 0,
acl_denied integer default 0,
kafka_produced integer default 0,
extra jsonb
);
create index if not exists idx_gateway_heartbeats_gid_ts on public.gateway_heartbeats(gateway_id, ts desc);
alter table public.gateway_heartbeats enable row level security;
do $$
begin
if not exists (
select 1 from pg_policies where schemaname='public' and tablename='gateway_heartbeats' and policyname='gateway_heartbeats_select') then
create policy gateway_heartbeats_select on public.gateway_heartbeats
for select using (
exists (
select 1 from public.gateway_nodes g
where g.id = gateway_id
and (g.owner_user_id = auth.uid() or coalesce(public.is_admin(auth.uid()), false))
)
);
end if;
end$$;
-- 3) Latest status view per gateway
create or replace view public.gateway_status_latest as
select distinct on (g.id)
g.id as gateway_id,
g.name,
g.mqtt_client_id,
g.version,
g.region,
h.ts as last_ts,
h.uptime_sec,
h.mem_rss_mb,
h.heap_used_mb,
h.mqtt_connected,
h.kafka_connected,
h.redis_connected,
h.msgs_in,
h.msgs_out,
h.msgs_dropped,
h.errors,
h.acl_denied,
h.kafka_produced
from public.gateway_nodes g
left join public.gateway_heartbeats h
on h.gateway_id = g.id
order by g.id, h.ts desc;
-- 4) Daily rollup (materialized view). Refresh nightly via scheduler.
create materialized view if not exists public.gateway_daily_stats as
select
date_trunc('day', h.ts)::date as day,
h.gateway_id,
sum(h.msgs_in) as msgs_in,
sum(h.msgs_out) as msgs_out,
sum(h.msgs_dropped) as msgs_dropped,
sum(h.errors) as errors,
sum(h.acl_denied) as acl_denied,
sum(h.kafka_produced) as kafka_produced,
avg(h.mem_rss_mb) as avg_mem_rss_mb,
avg(h.heap_used_mb) as avg_heap_used_mb
from public.gateway_heartbeats h
group by 1, 2
with no data;
create index if not exists idx_gateway_daily_stats on public.gateway_daily_stats(day, gateway_id);
-- Note: schedule periodic
-- select cron.schedule('refresh_gateway_daily_stats', '0 3 * * *', $$
-- refresh materialized view concurrently public.gateway_daily_stats;
-- $$);

View File

@@ -0,0 +1,73 @@
version: '3.8'
services:
# Single-node KRaft (broker+controller) Kafka using Confluent image
kafka:
image: confluentinc/cp-kafka:8.0.0
ports:
- "9092:9092" # internal clients inside docker network can use kafka:9092
- "9050:29092" # host clients can use host:9050 (mapped from container 29092). Use any allowed host port 9050-9059.
environment:
# KRaft combined node
KAFKA_NODE_ID: "1"
KAFKA_PROCESS_ROLES: "broker,controller"
# fixed cluster id (keeps data directory compatible across restarts for this single-node test)
KAFKA_CLUSTER_ID: "MkU3OEVBNTcwNTJENDM2Qk"
CLUSTER_ID: "MkU3OEVBNTcwNTJENDM2Qk"
KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093"
# listeners: internal plaintext for inter-broker, external SASL_PLAINTEXT for host access, controller for KRaft
KAFKA_LISTENERS: "INTERNAL://0.0.0.0:9092,EXTERNAL://0.0.0.0:29092,CONTROLLER://0.0.0.0:9093"
# Advertise the host IP so remote clients can reach the broker
KAFKA_ADVERTISED_LISTENERS: "INTERNAL://kafka:9092,EXTERNAL://localhost:9050,CONTROLLER://kafka:9093"
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "INTERNAL:PLAINTEXT,EXTERNAL:SASL_PLAINTEXT,CONTROLLER:PLAINTEXT"
KAFKA_INTER_BROKER_LISTENER_NAME: "INTERNAL"
KAFKA_SASL_ENABLED_MECHANISMS: "SCRAM-SHA-256,SCRAM-SHA-512"
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: "SCRAM-SHA-512"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1"
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: "1"
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: "1"
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf"
volumes:
- ./kafka_server_jaas.conf:/etc/kafka/kafka_server_jaas.conf:ro
restart: unless-stopped
redis:
image: redis:7-alpine
restart: unless-stopped
ports:
- "6579:6379"
volumes:
- ./redis/data:/data
command: ["redis-server", "--appendonly", "yes"]
clickhouse:
image: clickhouse/clickhouse-server:latest
depends_on:
- kafka
ulimits:
nofile:
soft: 262144
hard: 262144
ports:
- "8123:8123" # HTTP
- "9000:9000" # Native
- "9009:9009" # TCP for interserver (optional)
volumes:
- ./clickhouse_data:/var/lib/clickhouse
# mount a users.d directory so we can set a default user's password/config
- ./clickhouse_config/users.d:/etc/clickhouse-server/users.d:ro
grafana:
image: grafana/grafana:latest
restart: unless-stopped
environment:
GF_INSTALL_PLUGINS: vertamedia-clickhouse-datasource,hamedkarbasi93-kafka-datasource
# Optional: set admin password via GF_SECURITY_ADMIN_PASSWORD
GF_SECURITY_ADMIN_PASSWORD: Admin_Hexiaoniu1
ports:
- "3011:3000"
volumes:
- ./grafana:/var/lib/grafana
- ./grafana/provisioning:/etc/grafana/provisioning:ro
depends_on:
- clickhouse

168
doc_chat/emqx配置.md Normal file
View File

@@ -0,0 +1,168 @@
# EMQX 配置与端口规划(配合当前微通信网关)
本文给出在 IP 119.146.131.237 上,以您提供的端口规划,如何配置 EMQX 以最小代价配合我们当前的功能(聊天上行 → Webhook 入库;设备下发 → 网关发布ACK 标记)。
## 端口/NAT 规划
- 外网 → 内部端口:
- 8105 → 1883MQTT/TCP移动端/设备、网关 MQTT 直连使用
- 8083 → 8083MQTT over WebSocket/WSWeb 端非 TLS 调试
- 8084 → 8084MQTT over WebSocket/WSSWeb 端 TLS需证书
- 8104 → 18083EMQX Dashboard运维面板建议仅内网或白名单
- 9560 → 3000网关 HTTP WebhookEMQX 推送 Webhook 到网关 `http://119.146.131.237:9560/webhooks/mqtt`
- 3306MySQL仅当使用 MySQL 作为 EMQX 的认证/ACL 后端时需要暴露;否则建议只内网放通
网关server/gateway-mqtt-node默认 HTTP Webhook 端口为 3000已在 .env 可改;若和 EMQX 不同机房,则通过 9560 做端口映射公开给 EMQX 调用。
## 网关 .env 对应配置
位于 `server/gateway-mqtt-node/.env`
```
MQTT_URL=mqtt://119.146.131.237:8105
HTTP_PORT=3000
WEBHOOK_TOKEN=<自定义强密码>
# ACK 与下发已启用(代码已支持)
CHAT_DOWNLINK_ENABLE=true
ACK_ENABLE=true
ACK_TOPIC_PATTERN=device/+/ack
```
说明:
- EMQX Webhook 需携带 `x-webhook-token: <同上>` 以通过网关校验。
- 下发主题约定:`device/{userId}/down`ACK 主题:`device/{userId}/ack`
## EMQX 与网关的功能对接
我们当前的主题规范:
- 客户端上行(发消息):`chat/send/{conversationId}`EMQX → Webhook → 网关 → Supabase
- 网关回送确认(可选 echo`chat/recv/{conversationId}`(网关 → MQTT
- 设备下发:`device/{userId}/down`(网关 → MQTT
- 设备 ACK`device/{userId}/ack`(设备 → MQTT → 网关订阅标记 ack
### 方案 AEMQX 5.x 推荐):规则引擎 + HTTP 数据桥Webhook
1) 创建 HTTP 服务器资源Data Bridge → HTTP Server
- 目标 URL: `http://119.146.131.237:9560/webhooks/mqtt`
- Header: `x-webhook-token: <与网关 .env 一致>`
- Payload 编码:开启 Base64以兼容任意负载
2) 创建规则Rules
- 规则 SQL仅转发我们关心的主题
```sql
SELECT
payload as payload,
topic as topic,
clientid as clientid,
username as username,
timestamp as ts
FROM
"message.publish"
WHERE
topic LIKE 'chat/send/%'
```
- 选择上一步的 HTTP 资源作为动作Action
- 在动作的请求体中,使用 EMQX 模板自定义为:
```json
{
"event": "message.publish",
"topic": "${topic}",
"clientid": "${clientid}",
"username": "${username}",
"payload_base64": "${payload}"
}
```
- 保存并启用规则。
规则生效后,任意客户端向 `chat/send/xxx` 发布消息EMQX 会将事件通过 HTTP POST 投递到网关;网关会校验 `x-webhook-token`,解析 payload写入 Supabase。
### 方案 BEMQX 4.x 可用):启用 WebHook 插件
在 `emqx.conf` / `plugins` 中启用 `emqx_web_hook`,并配置:
```
web.hook.api.url = http://119.146.131.237:9560/webhooks/mqtt
web.hook.encode_payload = base64
web.hook.headers = x-webhook-token=<与网关 .env 一致>
# 只打开需要的事件message.publish 即可)
web.hook.rule.message.publish.1 = {"action": "on_message_publish"}
```
重启生效后,与方案 A 类似生效路径。
## 认证与 ACL 建议
最小可用(开发):可先允许匿名或统一账号连接,靠网关与数据库 RLS 做细粒度校验。生产建议:
1) 认证(任选其一)
- JWT 认证:使用 EMQX JWT Auth设置 HS256 密钥为 Supabase 项目的 JWT Secret使客户端将 Supabase 的 JWT 作为 password或 username提交由 EMQX 验签。
- 用户名/密码:内置用户数据库,按用户维度下发 ACL。
- MySQL/HTTP 认证:若必须使用 3306 的 MySQL 为后端,建议仅内网放通 3306并在 EMQX 配 MySQL 认证与 ACL 表。
2) ACL示例规则
- 普通客户端(以用户名等于 userId 为例):
- 允许 Publish 到 `chat/send/#`
- 允许 Subscribe `chat/recv/#`
- 允许 Subscribe `device/%u/down`%u 为用户名)
- 允许 Publish `device/%u/ack`
- 网关客户端clientid= gateway-* 或特定用户名):
- 允许 Subscribe `chat/send/#`、`device/+/ack`
- 允许 Publish `chat/recv/#`、`device/+/down`
EMQX 5.x 可在 Authorization 中添加 Built-in Database 规则EMQX 4.x 可用 `emqx_authz.conf`
```
{allow, {user, "<gateway-user>"}, subscribe, ["chat/send/#", "device/+/ack"]}.
{allow, {user, "<gateway-user>"}, publish, ["chat/recv/#", "device/+/down"]}.
{allow, all, publish, ["chat/send/#", "device/%u/ack"]}.
{allow, all, subscribe, ["chat/recv/#", "device/%u/down"]}.
{deny, all, all, ["#"]}.
```
> 说明:严格的会话参与者校验在网关/数据库侧完成EMQX 侧 ACL 采用“粗粒度放行、最小必要”的方式,降低规则复杂度。
## 客户端接入地址
- MQTT/TCP`mqtt://119.146.131.237:8105`
- WebSocket (WS)`ws://119.146.131.237:8083/mqtt`
- WebSocket (WSS)`wss://119.146.131.237:8084/mqtt`(需部署证书)
- Dashboard`http://119.146.131.237:8104`(建议加 IP 白名单或 VPN
## 验证流程(端到端)
1) 启动网关(确保 .env 已正确配置 MQTT_URL/WEBHOOK_TOKEN 等):
- 在 `server/gateway-mqtt-node` 目录启动:`npm run dev`
2) 验证上行(客户端 → EMQX → Webhook → 网关 → DB
- 用任意 MQTT 客户端向 `chat/send/<conversationId>` 发布 JSON 负载,例如:
```json
{"sender_id":"<userId>","content":"hello","content_type":"text"}
```
- 观察网关日志应有 persist ok并在 Supabase 的 chat_messages 有记录。
3) 验证下发DB → 网关 → EMQX → 设备)
- 运行仓库中的脚本插入下发表:
- `npm run simulate:chat:downlink`(需设置 SIM_CHAT_CONVERSATION_ID / SIM_TARGET_USER_ID
- 设备(或 MQTT 客户端)订阅 `device/<userId>/down` 应收到消息。
4) 验证 ACK设备 → EMQX → 网关 → DB 标记)
- 设备向 `device/<userId>/ack` 发布包含 `correlation_id` 的 JSON或使用脚本
- `npm run simulate:ack`(设置 SIM_ACK_TARGET / SIM_CORRELATION_ID
- 网关日志显示 `ack applied`,数据库下发记录状态变为 `acked`。
## 安全与运维建议
- 关闭不必要的外网端口:如非必须,请勿对公网开放 3306Dashboard 18083 建议仅内网或加白名单。
- 配置 TLS为 8084/WSS及可选 8883/MQTTS部署证书强制 Web 客户端走 WSS。
- Webhook 验证:务必设置强随机 `WEBHOOK_TOKEN` 并在 EMQX 配相同 Header。
- 账号与 ACL生产环境务必启用认证JWT/用户名密码/HTTP/MySQL与最小权限 ACL网关使用独立账户。
- 监控:可将 EMQX 指标Prometheus与网关心跳gateway_heartbeats接入统一监控。
---
如需,我可以根据实际 EMQX 版本4.x/5.x提供对应的导出/导入 JSON 或 CLI 命令,或远程协助完成配置。

View File

@@ -0,0 +1,135 @@
-- Ensure ak_users has a primary key and chat tables reference it consistently
-- This script can be executed on Supabase/PostgreSQL to harden chat user relations
--
-- Steps:
-- 1. Add a primary key on public.ak_users(id) if it is missing.
-- 2. Backfill missing ak_users rows from auth.users so foreign keys can validate.
-- 3. Recreate chat_* foreign keys that point to ak_users.
begin;
-- 1) Guard against duplicate or null ids before creating the primary key.
DO
$$
DECLARE
duplicate_ids integer;
null_ids integer;
BEGIN
SELECT count(*) INTO duplicate_ids
FROM (
SELECT id
FROM public.ak_users
GROUP BY id
HAVING count(*) > 1
) dup;
SELECT count(*) INTO null_ids
FROM public.ak_users
WHERE id IS NULL;
IF duplicate_ids > 0 THEN
RAISE EXCEPTION 'Cannot add primary key on public.ak_users(id): % duplicate id(s) detected', duplicate_ids;
ELSIF null_ids > 0 THEN
RAISE EXCEPTION 'Cannot add primary key on public.ak_users(id): % row(s) have NULL id', null_ids;
END IF;
IF NOT EXISTS (
SELECT 1
FROM pg_constraint
WHERE conname = 'ak_users_pkey'
AND conrelid = 'public.ak_users'::regclass
) THEN
ALTER TABLE public.ak_users
ADD CONSTRAINT ak_users_pkey PRIMARY KEY (id);
END IF;
END;
$$;
-- 2) Backfill basic ak_users profiles from auth.users for any missing ids.
INSERT INTO public.ak_users (
id,
username,
email,
password_hash,
gender,
birthday,
height_cm,
weight_kg,
bio,
phone,
avatar_url,
region_id,
school_id,
grade_id,
class_id,
role,
created_at,
updated_at,
auth_id,
preferred_language
)
SELECT
au.id,
COALESCE(NULLIF(trim(au.raw_user_meta_data ->> 'nickname'), ''),
NULLIF(trim(au.raw_user_meta_data ->> 'username'), ''),
split_part(au.email, '@', 1)),
COALESCE(au.email, 'user_' || substr(au.id::text, 1, 8) || '@akmon.local'),
'auth-managed:' || au.id::text,
COALESCE(NULLIF(trim(au.raw_user_meta_data ->> 'gender'), ''), 'other'),
NULL,
NULL,
NULL,
NULL,
NULL,
NULLIF(trim(au.raw_user_meta_data ->> 'avatar_url'), ''),
NULL,
NULL,
NULL,
NULL,
COALESCE(NULLIF(trim(au.raw_user_meta_data ->> 'role'), ''), 'student'),
COALESCE(au.created_at, now()),
COALESCE(au.updated_at, now()),
au.id,
NULLIF(trim(au.raw_user_meta_data ->> 'preferred_language'), '')
FROM auth.users au
LEFT JOIN public.ak_users ak ON ak.id = au.id
WHERE ak.id IS NULL;
-- 3) Recreate chat foreign keys to reference ak_users.
ALTER TABLE public.chat_conversations
DROP CONSTRAINT IF EXISTS chat_conversations_owner_id_fkey;
ALTER TABLE public.chat_conversations
ADD CONSTRAINT chat_conversations_owner_id_fkey
FOREIGN KEY (owner_id) REFERENCES public.ak_users(id) ON DELETE SET NULL;
ALTER TABLE public.chat_participants
DROP CONSTRAINT IF EXISTS chat_participants_user_id_fkey;
ALTER TABLE public.chat_participants
ADD CONSTRAINT chat_participants_user_id_fkey
FOREIGN KEY (user_id) REFERENCES public.ak_users(id) ON DELETE CASCADE;
ALTER TABLE public.chat_messages
DROP CONSTRAINT IF EXISTS chat_messages_sender_id_fkey;
ALTER TABLE public.chat_messages
ADD CONSTRAINT chat_messages_sender_id_fkey
FOREIGN KEY (sender_id) REFERENCES public.ak_users(id) ON DELETE CASCADE;
ALTER TABLE public.chat_notifications
DROP CONSTRAINT IF EXISTS chat_notifications_user_id_fkey;
ALTER TABLE public.chat_notifications
ADD CONSTRAINT chat_notifications_user_id_fkey
FOREIGN KEY (user_id) REFERENCES public.ak_users(id) ON DELETE CASCADE;
ALTER TABLE public.chat_mqtt_downlinks
DROP CONSTRAINT IF EXISTS chat_mqtt_downlinks_target_user_id_fkey;
ALTER TABLE public.chat_mqtt_downlinks
ADD CONSTRAINT chat_mqtt_downlinks_target_user_id_fkey
FOREIGN KEY (target_user_id) REFERENCES public.ak_users(id) ON DELETE SET NULL;
ALTER TABLE public.chat_mqtt_downlinks
DROP CONSTRAINT IF EXISTS chat_mqtt_downlinks_created_by_fkey;
ALTER TABLE public.chat_mqtt_downlinks
ADD CONSTRAINT chat_mqtt_downlinks_created_by_fkey
FOREIGN KEY (created_by) REFERENCES public.ak_users(id) ON DELETE CASCADE;
commit;

View File

@@ -0,0 +1,60 @@
# 大规模健康时序数据的存储选型Supabase vs ClickHouse vs InfluxDB 3
结论先行原始高频健康数据MQTT/蓝牙上报,写多读少、长期保留、需大跨度聚合)不宜长期堆在 Supabase(Postgres)。推荐采用“混合架构”:
- Supabase承载关系/权限/最近快照/应用侧事件与实时订阅(聊天与通知等)。
- ClickHouse 或 InfluxDB 3承载海量原始时序与聚合视图负责低成本高吞吐写入与大跨度查询。
## 1) 何时用谁(快速决策)
- 优先 ClickHouse需要通用 SQL、多维过滤、复杂聚合TopN、百分位/直方图、OLAP 报表与成本可控的长保留。
- 可选 InfluxDB 3数据严格是“时序度量 + 标签”的简洁模型,关注 Retention/Downsampling客户端/生态偏向时序栈Telegraf 等)。
- Supabase仅存最近快照、小体量派生指标、索引映射用户/设备/权限/数据入口)。
## 2) 推荐架构(混合)
MQTT/BLE → 网关(鉴权/幂等) → Kafka(ts.health) →
- Sink → ClickHouse或 InfluxDB 3写入明细表 + 物化/降采样视图
- 回写 → Supabase最新快照、告警、索引
前端:
- 趋势/历史:查询 ClickHouse/Influx
- 业务/权限/会话:查询 Supabase
- 实时Supabase Realtime
## 3) ClickHouse 设计要点(示例)
- 明细表MergeTree月分区 + 主键顺序 + TTL
```
CREATE TABLE health_raw (
user_id UUID,
device_id UUID,
metric LowCardinality(String),
ts DateTime64(3, 'UTC'),
value Float64,
meta JSON,
ingest_dt Date DEFAULT today()
) ENGINE = MergeTree
PARTITION BY toYYYYMM(ts)
PRIMARY KEY (user_id, metric, ts)
ORDER BY (user_id, metric, ts)
TTL ts + INTERVAL 365 DAY DELETE;
```
- 物化视图:分钟/小时聚合用于长跨度查询Kafka 引入可用 Kafka Engine 或外部 SinkTelegraf/Vector/Fluent Bit/Connectors
## 4) InfluxDB 3 要点
- measurement+tags+fields+timestampRetention & Downsampling 用 bucket/任务;
- 接入Telegraf mqtt_consumer → influxdb_v3 输出最省事;亦可 Kafka → Telegraf/自写消费者。
## 5) 与 Supabase 集成
- 保留关系/权限/会话/通知;
-`user_metrics_last` 快照表metric、last_value、last_ts、link用于快速渲染与跳转到时序库查询
- 后端 API 聚合:统一鉴权(基于 Supabase 用户),内部代查 ClickHouse/Influx 并合并结果。
## 6) 成本与运维
- ClickHouse 压缩/列存长保留更省InfluxDB 3 简化时序运维Supabase 避免明细爆表降成本;
- 监控写入延迟、失败率、分区大小、查询耗时、Kafka lag、告警阈值。
## 7) 落地步骤
1. 保持现有 Supabase 聊天/通知;
2. 按上面 schema 搭一套 ClickHouse或 InfluxDB 3与 1m/1h 物化/降采样;
3. 网关产出 Kafka ts.health 并接 Sink
4. 前端曲线改走时序库(或由后端代理);
5. 上线监控与告警、成本评估与冷热分层。
小结:原始大规模健康时序数据 → ClickHouse/InfluxDB 3Supabase 负责“关系 + 实时 + 快照索引”,两者配合既稳又省。

179
doc_chat/locations.sql Normal file
View File

@@ -0,0 +1,179 @@
-- Location tracking schema for 2.4G gateway based wearable positioning
-- Run this script in Supabase/PostgreSQL environment after ensuring uuid-ossp extension is available.
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
-- =============================================================
-- 1. 2.4G Gateways registry
-- =============================================================
CREATE TABLE IF NOT EXISTS location_gateways (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
organization_id UUID, -- optional, reference to owning org / school (wire manually)
campus_code TEXT, -- logical campus/region grouping code
area_name TEXT, -- eg. East Campus, Zone B
building_name TEXT,
floor_label TEXT,
room_label TEXT,
name TEXT NOT NULL, -- human friendly name
system_code TEXT NOT NULL UNIQUE, -- system/asset identifier
serial_number TEXT,
description TEXT,
latitude DECIMAL(10, 8) NOT NULL,
longitude DECIMAL(11, 8) NOT NULL,
altitude_m NUMERIC(8, 2),
coordinate_system TEXT DEFAULT 'WGS84', -- WGS84 / GCJ02 / BD09 etc
install_height_m NUMERIC(6, 2),
orientation_deg NUMERIC(5, 2),
coverage_radius_m NUMERIC(6, 2),
coverage_notes TEXT,
ip_address INET,
lan_mac MACADDR,
upstream_mac MACADDR,
firmware_version TEXT,
hardware_version TEXT,
heartbeat_interval_s INTEGER,
status TEXT DEFAULT 'active' CHECK (status IN ('active', 'maintenance', 'inactive', 'retired')),
last_online_at TIMESTAMPTZ,
last_maintenance_at TIMESTAMPTZ,
tags TEXT[],
extra JSONB,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
deleted_at TIMESTAMPTZ
);
COMMENT ON TABLE location_gateways IS 'Registry of 2.4G gateways installed across campuses.';
COMMENT ON COLUMN location_gateways.extra IS 'Extensible JSON payload for vendor-specific metadata.';
CREATE INDEX IF NOT EXISTS idx_location_gateways_org ON location_gateways(organization_id);
CREATE INDEX IF NOT EXISTS idx_location_gateways_geo ON location_gateways(latitude, longitude);
CREATE INDEX IF NOT EXISTS idx_location_gateways_status ON location_gateways(status);
-- Auto update trigger for updated_at
CREATE OR REPLACE FUNCTION trg_location_gateways_touch() RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at := NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS trg_location_gateways_touch ON location_gateways;
CREATE TRIGGER trg_location_gateways_touch
BEFORE UPDATE ON location_gateways
FOR EACH ROW EXECUTE FUNCTION trg_location_gateways_touch();
-- =============================================================
-- 2. Wearable location snapshots (per time slice, per device)
-- =============================================================
CREATE TABLE IF NOT EXISTS wearable_location_snapshots (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
device_id UUID NOT NULL REFERENCES public.ak_devices(id) ON DELETE CASCADE,
user_id UUID REFERENCES public.ak_users(id) ON DELETE SET NULL,
estimated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
window_started_at TIMESTAMPTZ,
window_ended_at TIMESTAMPTZ,
latitude DECIMAL(10, 8),
longitude DECIMAL(11, 8),
altitude_m NUMERIC(8, 2),
coordinate_system TEXT DEFAULT 'WGS84',
uncertainty_radius_m NUMERIC(6, 2), -- estimated accuracy / radius (meters)
confidence NUMERIC(4, 2), -- 0.00 - 1.00
method TEXT, -- e.g. WCL, Trilateration, Kalman
algorithm_version TEXT,
observation_count INTEGER DEFAULT 0,
campus_code TEXT,
area_name TEXT,
building_name TEXT,
floor_label TEXT,
room_label TEXT,
observations JSONB, -- optional denormalized structure of gateway RSSI readings
diagnostics JSONB, -- troubleshooting info (residuals, covariance etc)
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
COMMENT ON TABLE wearable_location_snapshots IS 'Estimated wearable locations derived from multi-gateway RSSI aggregation.';
COMMENT ON COLUMN wearable_location_snapshots.observations IS 'Optional JSON containing gateway RSSI array for quick lookup.';
CREATE INDEX IF NOT EXISTS idx_wearable_snapshots_device_time ON wearable_location_snapshots(device_id, estimated_at DESC);
CREATE INDEX IF NOT EXISTS idx_wearable_snapshots_user_time ON wearable_location_snapshots(user_id, estimated_at DESC);
CREATE INDEX IF NOT EXISTS idx_wearable_snapshots_geo ON wearable_location_snapshots(latitude, longitude);
CREATE OR REPLACE FUNCTION trg_wearable_snapshots_touch() RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at := NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS trg_wearable_snapshots_touch ON wearable_location_snapshots;
CREATE TRIGGER trg_wearable_snapshots_touch
BEFORE UPDATE ON wearable_location_snapshots
FOR EACH ROW EXECUTE FUNCTION trg_wearable_snapshots_touch();
-- =============================================================
-- 3. Raw gateway RSSI observations per snapshot
-- =============================================================
CREATE TABLE IF NOT EXISTS wearable_location_observations (
id BIGSERIAL PRIMARY KEY,
snapshot_id UUID NOT NULL REFERENCES wearable_location_snapshots(id) ON DELETE CASCADE,
gateway_id UUID NOT NULL REFERENCES location_gateways(id) ON DELETE CASCADE,
received_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
gateway_time TIMESTAMPTZ,
rssi_dbm NUMERIC(5, 2) NOT NULL,
snr_db NUMERIC(5, 2),
distance_m NUMERIC(7, 2), -- computed from RSSI if available
channel INTEGER,
frequency_mhz NUMERIC(6, 2),
packet_sequence INTEGER,
latency_ms INTEGER,
firmware_version TEXT,
payload JSONB,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
COMMENT ON TABLE wearable_location_observations IS 'Raw 2.4G gateway readings contributing to a wearable location snapshot.';
CREATE UNIQUE INDEX IF NOT EXISTS idx_observations_unique_sample
ON wearable_location_observations(snapshot_id, gateway_id);
CREATE INDEX IF NOT EXISTS idx_observations_gateway_time
ON wearable_location_observations(gateway_id, received_at DESC);
CREATE INDEX IF NOT EXISTS idx_observations_snapshot ON wearable_location_observations(snapshot_id);
-- Optional view for latest snapshot per device
CREATE OR REPLACE VIEW wearable_latest_location AS
SELECT DISTINCT ON (device_id)
device_id,
id AS snapshot_id,
estimated_at,
latitude,
longitude,
altitude_m,
uncertainty_radius_m,
confidence,
method,
algorithm_version,
created_at
FROM wearable_location_snapshots
ORDER BY device_id, estimated_at DESC;
COMMENT ON VIEW wearable_latest_location IS 'Convenience view returning the latest computed position per wearable device.';

141
doc_chat/mock_chat.sql Normal file
View File

@@ -0,0 +1,141 @@
-- Mock chat data for local development and demos
-- Uses deterministic UUIDs so the dataset can be reapplied safely
-- Assumes chat schema from doc_chat/create_chat_tables.sql is already deployed
-- Primary tester account (parent side): eed3824b-bba1-4309-8048-19d17367c084
-- Homeroom teacher account: e95b36ab-6cc8-4b69-8d23-286f98fe79ce
-- Demo student account: 0122a2e6-c412-4e9a-9ad5-403e0048333c
begin;
-- Optional cleanup so script is idempotent
delete from chat_notifications where conversation_id = any(array[
'0f2c8f30-9f0a-4a7b-8b75-9d41c4f20101'::uuid,
'0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202'::uuid
]);
delete from chat_messages where conversation_id = any(array[
'0f2c8f30-9f0a-4a7b-8b75-9d41c4f20101'::uuid,
'0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202'::uuid
]);
delete from chat_participants where conversation_id = any(array[
'0f2c8f30-9f0a-4a7b-8b75-9d41c4f20101'::uuid,
'0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202'::uuid
]);
delete from chat_conversations where id = any(array[
'0f2c8f30-9f0a-4a7b-8b75-9d41c4f20101'::uuid,
'0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202'::uuid
]);
delete from chat_mqtt_downlinks where conversation_id = any(array[
'0f2c8f30-9f0a-4a7b-8b75-9d41c4f20101'::uuid,
'0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202'::uuid
]);
delete from chat_gateway_heartbeats where gateway_id = '7f8e2e9c-62c3-4fb5-9a4f-18aa0a118001';
delete from chat_gateway_nodes where id = '7f8e2e9c-62c3-4fb5-9a4f-18aa0a118001';
-- Conversations
insert into chat_conversations (id, title, is_group, owner_id, last_message_at, metadata, created_at, updated_at)
values
('0f2c8f30-9f0a-4a7b-8b75-9d41c4f20101', null, false, 'e95b36ab-6cc8-4b69-8d23-286f98fe79ce',
'2025-09-27 09:05:00+08'::timestamptz, '{"context":"direct_support"}'::jsonb,
'2025-09-26 08:55:00+08'::timestamptz, '2025-09-27 09:05:00+08'::timestamptz),
('0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', '体能训练筹备群', true, '05b40c9c-6b2d-4f1a-9d84-d1f4db1a0004',
'2025-09-28 08:10:45+08'::timestamptz, '{"sport":"basketball","tags":["training","weekly"]}'::jsonb,
'2025-09-26 12:10:00+08'::timestamptz, '2025-09-28 08:10:45+08'::timestamptz);
-- Participants
insert into chat_participants (id, conversation_id, user_id, role, joined_at, settings)
values
('8a111111-2222-4333-8444-555555555501', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20101', 'e95b36ab-6cc8-4b69-8d23-286f98fe79ce', 'owner',
'2025-09-26 08:55:00+08'::timestamptz, '{"notifications":"all"}'::jsonb),
('8a111111-2222-4333-8444-555555555502', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20101', 'eed3824b-bba1-4309-8048-19d17367c084', 'member',
'2025-09-26 08:55:30+08'::timestamptz, '{"notifications":"mentions"}'::jsonb),
('8a111111-2222-4333-8444-555555555503', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', '05b40c9c-6b2d-4f1a-9d84-d1f4db1a0004', 'owner',
'2025-09-26 12:10:00+08'::timestamptz, '{"pin":true}'::jsonb),
('8a111111-2222-4333-8444-555555555504', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', '0122a2e6-c412-4e9a-9ad5-403e0048333c', 'member',
'2025-09-26 12:10:30+08'::timestamptz, '{"color":"teal"}'::jsonb),
('8a111111-2222-4333-8444-555555555505', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', 'eed3824b-bba1-4309-8048-19d17367c084', 'admin',
'2025-09-26 12:11:00+08'::timestamptz, '{"notifications":"digest"}'::jsonb);
-- Messages (chronological order per conversation)
insert into chat_messages (id, conversation_id, sender_id, content, content_type, ingress_type, reply_to, metadata, created_at, updated_at)
values
('6c70f6a3-87f0-4685-b83f-99180a220101', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20101', 'e95b36ab-6cc8-4b69-8d23-286f98fe79ce',
'早上好 Bob下午的体能课记得提前 10 分钟到场。', 'text', 'manual', null, '{"tone":"reminder"}'::jsonb,
'2025-09-27 09:02:00+08'::timestamptz, '2025-09-27 09:02:00+08'::timestamptz),
('6c70f6a3-87f0-4685-b83f-99180a220102', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20101', 'eed3824b-bba1-4309-8048-19d17367c084',
'收到!我会提前去热身,还需要带什么器材吗?', 'text', 'manual', '6c70f6a3-87f0-4685-b83f-99180a220101', null,
'2025-09-27 09:03:00+08'::timestamptz, '2025-09-27 09:03:00+08'::timestamptz),
('6c70f6a3-87f0-4685-b83f-99180a220103', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20101', 'e95b36ab-6cc8-4b69-8d23-286f98fe79ce',
'自备护腕就行,训练结束后我们复盘数据。', 'text', 'manual', '6c70f6a3-87f0-4685-b83f-99180a220102', '{"follow_up":true}'::jsonb,
'2025-09-27 09:05:00+08'::timestamptz, '2025-09-27 09:05:00+08'::timestamptz),
('6c70f6a3-87f0-4685-b83f-99180a220201', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', '05b40c9c-6b2d-4f1a-9d84-d1f4db1a0004',
'本周训练安排:周二体能、周四对抗、周六拉伸,附件里有详细指标。', 'markdown', 'manual', null,
'{"attachments":[{"type":"sheet","name":"week39-plan.xlsx"}]}'::jsonb,
'2025-09-28 07:30:00+08'::timestamptz, '2025-09-28 07:30:00+08'::timestamptz),
('6c70f6a3-87f0-4685-b83f-99180a220202', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', '0122a2e6-c412-4e9a-9ad5-403e0048333c',
'收到训练表,周二我带上心率带。', 'text', 'manual', '6c70f6a3-87f0-4685-b83f-99180a220201', '{"reaction":"thumbs_up"}'::jsonb,
'2025-09-28 08:00:00+08'::timestamptz, '2025-09-28 08:00:00+08'::timestamptz),
('6c70f6a3-87f0-4685-b83f-99180a220203', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', '0122a2e6-c412-4e9a-9ad5-403e0048333c',
'http://ak3.oulog.com:8000/storage/v1/object/public/chat-audio/voices/bluetooth/2025/09/28/voice-bt-080512.ogg', 'audio', 'bluetooth', null,
'{"storage_bucket":"chat-audio","object_path":"voices/bluetooth/2025/09/28/voice-bt-080512.ogg","public_url":"http://ak3.oulog.com:8000/storage/v1/object/public/chat-audio/voices/bluetooth/2025/09/28/voice-bt-080512.ogg","duration_sec":32,"codec":"opus"}'::jsonb,
'2025-09-28 08:05:12+08'::timestamptz, '2025-09-28 08:05:12+08'::timestamptz),
('6c70f6a3-87f0-4685-b83f-99180a220204', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', '0122a2e6-c412-4e9a-9ad5-403e0048333c',
'http://ak3.oulog.com:8000/storage/v1/object/public/chat-audio/voices/gateway/2025/09/28/voice-gw-081045.ogg', 'audio', 'school_gateway', null,
'{"gateway_id":"chat-gw-sports-a","storage_bucket":"chat-audio","object_path":"voices/gateway/2025/09/28/voice-gw-081045.ogg","public_url":"http://ak3.oulog.com:8000/storage/v1/object/public/chat-audio/voices/gateway/2025/09/28/voice-gw-081045.ogg","duration_sec":27,"codec":"aac"}'::jsonb,
'2025-09-28 08:10:45+08'::timestamptz, '2025-09-28 08:10:45+08'::timestamptz);
-- Notifications (unread + read samples)
insert into chat_notifications (id, user_id, conversation_id, message_id, type, is_read, created_at)
values
('4f3a0a10-1b84-4f7e-8ec3-111100000001', 'eed3824b-bba1-4309-8048-19d17367c084', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20101', '6c70f6a3-87f0-4685-b83f-99180a220101', 'message', true,
'2025-09-27 09:02:05+08'::timestamptz),
('4f3a0a10-1b84-4f7e-8ec3-111100000002', 'e95b36ab-6cc8-4b69-8d23-286f98fe79ce', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20101', '6c70f6a3-87f0-4685-b83f-99180a220102', 'message', true,
'2025-09-27 09:03:10+08'::timestamptz),
('4f3a0a10-1b84-4f7e-8ec3-111100000003', 'eed3824b-bba1-4309-8048-19d17367c084', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20101', '6c70f6a3-87f0-4685-b83f-99180a220103', 'message', false,
'2025-09-27 09:05:05+08'::timestamptz),
('4f3a0a10-1b84-4f7e-8ec3-111100000004', '0122a2e6-c412-4e9a-9ad5-403e0048333c', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', '6c70f6a3-87f0-4685-b83f-99180a220201', 'message', false,
'2025-09-28 07:30:15+08'::timestamptz),
('4f3a0a10-1b84-4f7e-8ec3-111100000005', 'eed3824b-bba1-4309-8048-19d17367c084', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', '6c70f6a3-87f0-4685-b83f-99180a220201', 'message', false,
'2025-09-28 07:30:15+08'::timestamptz),
('4f3a0a10-1b84-4f7e-8ec3-111100000006', '05b40c9c-6b2d-4f1a-9d84-d1f4db1a0004', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', '6c70f6a3-87f0-4685-b83f-99180a220202', 'message', true,
'2025-09-28 08:00:10+08'::timestamptz),
('4f3a0a10-1b84-4f7e-8ec3-111100000007', '05b40c9c-6b2d-4f1a-9d84-d1f4db1a0004', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', '6c70f6a3-87f0-4685-b83f-99180a220203', 'message', false,
'2025-09-28 08:05:15+08'::timestamptz),
('4f3a0a10-1b84-4f7e-8ec3-111100000008', 'eed3824b-bba1-4309-8048-19d17367c084', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', '6c70f6a3-87f0-4685-b83f-99180a220203', 'message', false,
'2025-09-28 08:05:15+08'::timestamptz),
('4f3a0a10-1b84-4f7e-8ec3-111100000009', '05b40c9c-6b2d-4f1a-9d84-d1f4db1a0004', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', '6c70f6a3-87f0-4685-b83f-99180a220204', 'message', false,
'2025-09-28 08:10:50+08'::timestamptz),
('4f3a0a10-1b84-4f7e-8ec3-11110000000a', 'eed3824b-bba1-4309-8048-19d17367c084', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', '6c70f6a3-87f0-4685-b83f-99180a220204', 'message', false,
'2025-09-28 08:10:50+08'::timestamptz);
-- Sample MQTT downlink tied to training reminder
insert into chat_mqtt_downlinks (id, conversation_id, target_user_id, topic, payload, payload_encoding, qos, retain, status,
scheduled_at, expires_at, created_by, created_at, updated_at, metadata)
values
('3d8e9956-1f35-4d34-a1f4-8d061df30001', '0f2c8f30-9f0a-4a7b-8b75-9d41c4f20202', 'eed3824b-bba1-4309-8048-19d17367c084',
'device/guardian-eed3824b/down', '{"type":"reminder","title":"周二体能课","start":"2025-09-30T14:00:00+08:00"}',
'json', 1, false, 'pending',
'2025-09-29 09:00:00+08'::timestamptz, '2025-09-30 12:00:00+08'::timestamptz,
'05b40c9c-6b2d-4f1a-9d84-d1f4db1a0004', '2025-09-28 08:05:00+08'::timestamptz, '2025-09-28 08:05:00+08'::timestamptz,
'{"priority":"high"}'::jsonb);
-- Gateway node + latest heartbeat for dashboard demos
insert into chat_gateway_nodes (id, name, mqtt_client_id, version, region, tags)
values
('7f8e2e9c-62c3-4fb5-9a4f-18aa0a118001', '体育馆边缘网关 A', 'chat-gw-sports-a', '1.4.2', 'cn-south', '{"rack":"gym-east","roles":["mqtt","kafka"]}'::jsonb);
insert into chat_gateway_heartbeats (id, gateway_id, created_at, uptime_sec, mem_rss_mb, heap_used_mb,
mqtt_connected, kafka_connected, redis_connected,
msgs_in, msgs_out, msgs_dropped, errors, acl_denied, kafka_produced, extra)
values
('9a3f5c12-2c6f-4b92-9e36-81f2f1010001', '7f8e2e9c-62c3-4fb5-9a4f-18aa0a118001',
'2025-09-28 08:02:00+08'::timestamptz, 86400, 612, 248,
true, true, true,
1280, 1264, 3, 1, 0, 1248,
'{"cpu_load":0.53,"latency_ms":42}'::jsonb);
commit;

View File

@@ -0,0 +1,12 @@
# Minimal Mosquitto config for dev/test
persistence true
persistence_location /mosquitto/data/
log_dest stdout
# allow anonymous for local dev; for prod, create password_file and set allow_anonymous false
allow_anonymous true
# listeners
listener 1883 0.0.0.0
protocol mqtt
# optional websocket listener
# listener 9001 0.0.0.0
# protocol websockets

80
doc_chat/rt.sql Normal file
View File

@@ -0,0 +1,80 @@
-- allow senders to read their own messages
create policy "messages_select_sender"
on public.chat_messages
for select
using (sender_id = auth.uid());
-- allow other conversation members to read messages
create policy "messages_select_members"
on public.chat_messages
for select
using (
exists (
select 1
from public.chat_participants p
where p.conversation_id = chat_messages.conversation_id
and p.user_id = auth.uid()
)
);
-- ===============================
-- Full setup for realtime listening via Supabase Realtime Inspector
-- Make sure we have uuid helpers for primary keys
create extension if not exists "pgcrypto";
create table if not exists public.ak_test (
id uuid primary key default gen_random_uuid(),
owner_id uuid default auth.uid(),
payload jsonb default '{}'::jsonb,
created_at timestamptz not null default timezone('utc', now())
);
comment on table public.ak_test is 'Scratch table for realtime policy tests';
-- Register the table with the realtime publication (idempotent)
do $$
begin
if not exists (
select 1
from pg_publication_tables
where pubname = 'supabase_realtime'
and schemaname = 'public'
and tablename = 'ak_test'
) then
alter publication supabase_realtime add table public.ak_test;
end if;
end $$;
-- ===============================
-- Reset helper: disable and recreate policies scoped to the `public` role
alter table public.ak_test disable row level security;
drop policy if exists "ak_test_insert_owner" on public.ak_test;
drop policy if exists "ak_test_select_all" on public.ak_test;
drop policy if exists "ak_test_select_owner" on public.ak_test;
alter table public.ak_test enable row level security;
create policy "ak_test_insert_owner"
on public.ak_test
for insert
to public
with check (owner_id = auth.uid());
create policy "ak_test_select_all"
on public.ak_test
for select
to public
using (true);
create policy "ak_test_select_owner"
on public.ak_test
for select
to public
using (owner_id = auth.uid());
-- To remove test data quickly
truncate table public.ak_test;

View File

@@ -0,0 +1,530 @@
# Usage
# Start: docker compose up
# With helpers: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml up
# Stop: docker compose down
# Destroy: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans
# Reset everything: ./reset.sh
name: supabase
services:
studio:
container_name: supabase-studio
image: supabase/studio:2025.06.30-sha-6f5982d
restart: unless-stopped
healthcheck:
test:
[
"CMD",
"node",
"-e",
"fetch('http://studio:3000/api/platform/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})"
]
timeout: 10s
interval: 5s
retries: 3
depends_on:
analytics:
condition: service_healthy
environment:
STUDIO_PG_META_URL: http://meta:8080
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
SUPABASE_URL: http://kong:8000
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
AUTH_JWT_SECRET: ${JWT_SECRET}
LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN}
LOGFLARE_URL: http://analytics:4000
NEXT_PUBLIC_ENABLE_LOGS: true
# Comment to use Big Query backend for analytics
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
# Uncomment to use Big Query backend for analytics
# NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
kong:
container_name: supabase-kong
image: kong:2.8.1
restart: unless-stopped
ports:
- ${KONG_HTTP_PORT}:8000/tcp
- ${KONG_HTTPS_PORT}:8443/tcp
volumes:
# https://github.com/supabase/supabase/issues/12661
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro,z
depends_on:
analytics:
condition: service_healthy
environment:
KONG_DATABASE: "off"
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
# https://github.com/supabase/cli/issues/14
KONG_DNS_ORDER: LAST,A,CNAME
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
# https://unix.stackexchange.com/a/294837
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
auth:
container_name: supabase-auth
image: supabase/gotrue:v2.177.0
restart: unless-stopped
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:9999/health"
]
timeout: 5s
interval: 5s
retries: 3
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
analytics:
condition: service_healthy
environment:
GOTRUE_API_HOST: 0.0.0.0
GOTRUE_API_PORT: 9999
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
GOTRUE_DB_DRIVER: postgres
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
GOTRUE_SITE_URL: ${SITE_URL}
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
GOTRUE_JWT_ADMIN_ROLES: service_role
GOTRUE_JWT_AUD: authenticated
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
GOTRUE_JWT_SECRET: ${JWT_SECRET}
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS}
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
# Uncomment to bypass nonce check in ID Token flow. Commonly set to true when using Google Sign In on mobile.
# GOTRUE_EXTERNAL_SKIP_NONCE_CHECK: true
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
GOTRUE_SMTP_HOST: ${SMTP_HOST}
GOTRUE_SMTP_PORT: ${SMTP_PORT}
GOTRUE_SMTP_USER: ${SMTP_USER}
GOTRUE_SMTP_PASS: ${SMTP_PASS}
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
# Uncomment to enable custom access token hook. Please see: https://supabase.com/docs/guides/auth/auth-hooks for full list of hooks and additional details about custom_access_token_hook
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED: "true"
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI: "pg-functions://postgres/public/custom_access_token_hook"
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRETS: "<standard-base64-secret>"
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED: "true"
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/mfa_verification_attempt"
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED: "true"
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/password_verification_attempt"
# GOTRUE_HOOK_SEND_SMS_ENABLED: "false"
# GOTRUE_HOOK_SEND_SMS_URI: "pg-functions://postgres/public/custom_access_token_hook"
# GOTRUE_HOOK_SEND_SMS_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
# GOTRUE_HOOK_SEND_EMAIL_ENABLED: "false"
# GOTRUE_HOOK_SEND_EMAIL_URI: "http://host.docker.internal:54321/functions/v1/email_sender"
# GOTRUE_HOOK_SEND_EMAIL_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
rest:
container_name: supabase-rest
image: postgrest/postgrest:v12.2.12
restart: unless-stopped
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
analytics:
condition: service_healthy
environment:
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
PGRST_DB_ANON_ROLE: anon
PGRST_JWT_SECRET: ${JWT_SECRET}
PGRST_DB_USE_LEGACY_GUCS: "false"
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
command:
[
"postgrest"
]
realtime:
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
container_name: realtime-dev.supabase-realtime
image: supabase/realtime:v2.34.47
restart: unless-stopped
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
analytics:
condition: service_healthy
healthcheck:
test:
[
"CMD",
"curl",
"-sSfL",
"--head",
"-o",
"/dev/null",
"-H",
"Authorization: Bearer ${ANON_KEY}",
"http://localhost:4000/api/tenants/realtime-dev/health"
]
timeout: 5s
interval: 5s
retries: 3
environment:
PORT: 4000
DB_HOST: ${POSTGRES_HOST}
DB_PORT: ${POSTGRES_PORT}
DB_USER: supabase_admin
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_NAME: ${POSTGRES_DB}
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
DB_ENC_KEY: supabaserealtime
API_JWT_SECRET: ${JWT_SECRET}
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
ERL_AFLAGS: -proto_dist inet_tcp
DNS_NODES: "''"
RLIMIT_NOFILE: "10000"
APP_NAME: realtime
SEED_SELF_HOST: true
RUN_JANITOR: true
# To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up
storage:
container_name: supabase-storage
image: supabase/storage-api:v1.25.7
restart: unless-stopped
volumes:
- ./volumes/storage:/var/lib/storage:z
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://storage:5000/status"
]
timeout: 5s
interval: 5s
retries: 3
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
rest:
condition: service_started
imgproxy:
condition: service_started
environment:
ANON_KEY: ${ANON_KEY}
SERVICE_KEY: ${SERVICE_ROLE_KEY}
POSTGREST_URL: http://rest:3000
PGRST_JWT_SECRET: ${JWT_SECRET}
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
FILE_SIZE_LIMIT: 52428800
STORAGE_BACKEND: file
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
TENANT_ID: stub
# TODO: https://github.com/supabase/storage-api/issues/55
REGION: stub
GLOBAL_S3_BUCKET: stub
ENABLE_IMAGE_TRANSFORMATION: "true"
IMGPROXY_URL: http://imgproxy:5001
imgproxy:
container_name: supabase-imgproxy
image: darthsim/imgproxy:v3.8.0
restart: unless-stopped
volumes:
- ./volumes/storage:/var/lib/storage:z
healthcheck:
test:
[
"CMD",
"imgproxy",
"health"
]
timeout: 5s
interval: 5s
retries: 3
environment:
IMGPROXY_BIND: ":5001"
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
IMGPROXY_USE_ETAG: "true"
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
meta:
container_name: supabase-meta
image: supabase/postgres-meta:v0.91.0
restart: unless-stopped
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
analytics:
condition: service_healthy
environment:
PG_META_PORT: 8080
PG_META_DB_HOST: ${POSTGRES_HOST}
PG_META_DB_PORT: ${POSTGRES_PORT}
PG_META_DB_NAME: ${POSTGRES_DB}
PG_META_DB_USER: supabase_admin
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
functions:
container_name: supabase-edge-functions
image: supabase/edge-runtime:v1.69.6
restart: unless-stopped
volumes:
- ./volumes/functions:/home/deno/functions:Z
depends_on:
analytics:
condition: service_healthy
environment:
JWT_SECRET: ${JWT_SECRET}
SUPABASE_URL: http://kong:8000
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
command:
[
"start",
"--main-service",
"/home/deno/functions/main"
]
analytics:
container_name: supabase-analytics
image: supabase/logflare:1.14.2
restart: unless-stopped
ports:
- 4000:4000
# Uncomment to use Big Query backend for analytics
# volumes:
# - type: bind
# source: ${PWD}/gcloud.json
# target: /opt/app/rel/logflare/bin/gcloud.json
# read_only: true
healthcheck:
test:
[
"CMD",
"curl",
"http://localhost:4000/health"
]
timeout: 5s
interval: 5s
retries: 10
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
environment:
LOGFLARE_NODE_HOST: 127.0.0.1
DB_USERNAME: supabase_admin
DB_DATABASE: _supabase
DB_HOSTNAME: ${POSTGRES_HOST}
DB_PORT: ${POSTGRES_PORT}
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_SCHEMA: _analytics
LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN}
LOGFLARE_SINGLE_TENANT: true
LOGFLARE_SUPABASE_MODE: true
LOGFLARE_MIN_CLUSTER_SIZE: 1
# Comment variables to use Big Query backend for analytics
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
POSTGRES_BACKEND_SCHEMA: _analytics
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
# Uncomment to use Big Query backend for analytics
# GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
# GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
# Comment out everything below this point if you are using an external Postgres database
db:
container_name: supabase-db
image: supabase/postgres:15.8.1.060
restart: unless-stopped
volumes:
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
# Must be superuser to create event trigger
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
# Must be superuser to alter reserved role
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
# Initialize the database settings with JWT_SECRET and JWT_EXP
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
# PGDATA directory is persisted between restarts
- ./volumes/db/data:/var/lib/postgresql/data:Z
# Changes required for internal supabase data such as _analytics
- ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z
# Changes required for Analytics support
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
# Changes required for Pooler support
- ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z
# Use named volume to persist pgsodium decryption key between restarts
- db-config:/etc/postgresql-custom
healthcheck:
test:
[
"CMD",
"pg_isready",
"-U",
"postgres",
"-h",
"localhost"
]
interval: 5s
timeout: 5s
retries: 10
depends_on:
vector:
condition: service_healthy
environment:
POSTGRES_HOST: /var/run/postgresql
PGPORT: ${POSTGRES_PORT}
POSTGRES_PORT: ${POSTGRES_PORT}
PGPASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
PGDATABASE: ${POSTGRES_DB}
POSTGRES_DB: ${POSTGRES_DB}
JWT_SECRET: ${JWT_SECRET}
JWT_EXP: ${JWT_EXPIRY}
command:
[
"postgres",
"-c",
"config_file=/etc/postgresql/postgresql.conf",
"-c",
"log_min_messages=fatal" # prevents Realtime polling queries from appearing in logs
]
vector:
container_name: supabase-vector
image: timberio/vector:0.28.1-alpine
restart: unless-stopped
volumes:
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro,z
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro,z
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://vector:9001/health"
]
timeout: 5s
interval: 5s
retries: 3
environment:
LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
command:
[
"--config",
"/etc/vector/vector.yml"
]
security_opt:
- "label=disable"
# Update the DATABASE_URL if you are using an external Postgres database
supavisor:
container_name: supabase-pooler
image: supabase/supavisor:2.5.7
restart: unless-stopped
ports:
- ${POSTGRES_PORT}:5432
- ${POOLER_PROXY_PORT_TRANSACTION}:6543
volumes:
- ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro,z
healthcheck:
test:
[
"CMD",
"curl",
"-sSfL",
"--head",
"-o",
"/dev/null",
"http://127.0.0.1:4000/api/health"
]
interval: 10s
timeout: 5s
retries: 5
depends_on:
db:
condition: service_healthy
analytics:
condition: service_healthy
environment:
PORT: 4000
POSTGRES_PORT: ${POSTGRES_PORT}
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
CLUSTER_POSTGRES: true
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
VAULT_ENC_KEY: ${VAULT_ENC_KEY}
API_JWT_SECRET: ${JWT_SECRET}
METRICS_JWT_SECRET: ${JWT_SECRET}
REGION: local
ERL_AFLAGS: -proto_dist inet_tcp
POOLER_TENANT_ID: ${POOLER_TENANT_ID}
POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE}
POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN}
POOLER_POOL_MODE: transaction
DB_POOL_SIZE: ${POOLER_DB_POOL_SIZE}
command:
[
"/bin/sh",
"-c",
"/app/bin/migrate && /app/bin/supavisor eval \"$$(cat /etc/pooler/pooler.exs)\" && /app/bin/server"
]
volumes:
db-config:

124
doc_chat/supa.env Normal file
View File

@@ -0,0 +1,124 @@
############
# Secrets
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
############
POSTGRES_PASSWORD=KiH57oIoDzNoU2y5PgucxNMifEb9GJmddNph2scYP1Hz0F3pC2eYgXYmpJ8oX
JWT_SECRET=IPFY0fjIeGpVyANpxPMg8kbKfHrRgXFYyklMSiPX509oO0cCLJegVuRuWuRIlNnL
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
DASHBOARD_USERNAME=supabase
DASHBOARD_PASSWORD=54UBsyHn3uY8NVPc1bQ7
SECRET_KEY_BASE=c5S2IKYkr9xJYjUZApNfYul0heagTjEV2AyT/3O3sjSoXWKpgVVfTajJGNOGGm/F
VAULT_ENC_KEY=JSLDHnMmWoflrTetRIqqQjCiPpF6erLL
############
# Database - You can change these to any PostgreSQL database that has logical replication enabled.
############
POSTGRES_HOST=db
POSTGRES_DB=postgres
POSTGRES_PORT=5432
# default user is postgres
############
# Supavisor -- Database pooler
############
POOLER_PROXY_PORT_TRANSACTION=6543
POOLER_DEFAULT_POOL_SIZE=20
POOLER_MAX_CLIENT_CONN=100
POOLER_TENANT_ID=chat-local
POOLER_DB_POOL_SIZE=5
############
# API Proxy - Configuration for the Kong Reverse proxy.
############
KONG_HTTP_PORT=8000
KONG_HTTPS_PORT=8443
############
# API - Configuration for PostgREST.
############
PGRST_DB_SCHEMAS=public,storage,graphql_public
############
# Auth - Configuration for the GoTrue authentication server.
############
## General
SITE_URL=http://localhost:3000
ADDITIONAL_REDIRECT_URLS=
JWT_EXPIRY=3600
DISABLE_SIGNUP=false
API_EXTERNAL_URL=http://localhost:8000
## Mailer Config
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
MAILER_URLPATHS_INVITE="/auth/v1/verify"
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
## Email auth
ENABLE_EMAIL_SIGNUP=true
ENABLE_EMAIL_AUTOCONFIRM=false
SMTP_ADMIN_EMAIL=admin@example.com
SMTP_HOST=supabase-mail
SMTP_PORT=2500
SMTP_USER=fake_mail_user
SMTP_PASS=fake_mail_password
SMTP_SENDER_NAME=fake_sender
ENABLE_ANONYMOUS_USERS=false
## Phone auth
ENABLE_PHONE_SIGNUP=true
ENABLE_PHONE_AUTOCONFIRM=true
############
# Studio - Configuration for the Dashboard
############
STUDIO_DEFAULT_ORGANIZATION=Default Organization
STUDIO_DEFAULT_PROJECT=Default Project
STUDIO_PORT=3000
# replace if you intend to use Studio outside of localhost
SUPABASE_PUBLIC_URL=http://localhost:8000
# Enable webp support
IMGPROXY_ENABLE_WEBP_DETECTION=true
# Add your OpenAI API key to enable SQL Editor Assistant
OPENAI_API_KEY=
############
# Functions - Configuration for Functions
############
# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
FUNCTIONS_VERIFY_JWT=false
############
# Logs - Configuration for Analytics
# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
############
# Change vector.toml sinks to reflect this change
# these cannot be the same value
LOGFLARE_PUBLIC_ACCESS_TOKEN=your-super-secret-and-long-logflare-key-public
LOGFLARE_PRIVATE_ACCESS_TOKEN=your-super-secret-and-long-logflare-key-private
# Docker socket location - this value will differ depending on your OS
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
# Google Cloud Project details
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER

218
doc_chat/位置.md Normal file
View File

@@ -0,0 +1,218 @@
明白了。你想把“2.4G 网关 + 手环 RSSI”估算出的位置信息接到地图里在 uni-app-xUTS Android上怎么做更稳当。下面给你一套实用建议和可直接复用的代码骨架包含精度圈、外部地图打开、国标坐标处理等
## 建议与实践路径
- 定位算法侧(从 RSSI 到坐标)
- 用对数路径损耗模型把 RSSI 转成距离d = 10^((RSSI0 RSSI) / (10·n))n 在室内一般 2.23.0,需要场地标定得到 RSSI0 和 n。
- 融合多网关:
- 简单稳健加权质心WCL权重用 1/d^p 或用 RSSI 线性/指数映射,抗多径比三边定位更鲁棒。
- 条件好时:三边定位(最小二乘)+ RANSAC 剔除离群网关。
- 平滑与抗抖动35 点中值滤波 + 指数滑动平均,或一维/二维卡尔曼滤波。
- 场景约束将估算点“吸附”到建筑内合法区域、多边形map matching并给出不确定性半径例如距离方差/残差→半径)。
- 坐标系问题(中国大陆必须重视)
- 如果你算出的是 WGS84GPS 原生),大多数国内地图(高德/腾讯/微信小程序/原生 <map>)使用 GCJ-02火星坐标。展示前请转换为 GCJ-02。
- 反向地理编码(坐标→地址)可用高德/腾讯等服务,注意密钥与限频,不要在前端暴露密钥。
- uni-app-xUTS Android地图接入的三种方式
1) 快速方案uni.openLocation 打开系统/第三方地图
- 最快、稳定;不在应用内嵌地图,适合查看/导航跳转。
2) 应用内嵌地图:内置 `<map>` 组件
- App-Android 通常可用 markers/circles/polyline在 uni-app-x 某些版本上功能覆盖可能比标准 uni 稍有限,如遇不支持的属性可退化或走插件方案。
3) UTS 原生插件(高德/腾讯地图 SDK
- 需要更高级能力(离线地图、室内图、海量点渲染、轨迹回放、热力图),建议用官方原生 SDK 的 UTS 插件。去 uni 原生插件市场搜索“高德地图 UTS/定位 UTS”等。
- 工程与性能建议
- 权限Android 申请 ACCESS_FINE_LOCATION在 manifest.json 的 app-plus 节点配置权限。
- 绑定简化:模板里避免函数调用和复杂表达式,预先计算好 markers/circles 字段(已按 UTS 规范给出示例)。
- 批量更新:频繁刷新时做节流/合并更新,避免每条数据都 setData。
- 可视化:用 circle 显示“不确定性半径”;锚点颜色代表置信度;支持点击 marker 查看详情。
## 可复用代码片段
### 1) 坐标转换WGS84 → GCJ-02UTS 版)
```ts
// utils/coord.uts
export function wgs84ToGcj02(lat: number, lng: number): number[] {
if (outOfChina(lat, lng)) return [lat, lng]
const dLat = transformLat(lng - 105.0, lat - 35.0)
const dLng = transformLng(lng - 105.0, lat - 35.0)
const radLat = lat / 180.0 * Math.PI
let magic = Math.sin(radLat)
magic = 1 - 0.00669342162296594323 * magic * magic
const sqrtMagic = Math.sqrt(magic)
const dLat2 = (dLat * 180.0) / ((6335552.717000426 * magic) / (sqrtMagic) * Math.PI)
const dLng2 = (dLng * 180.0) / ((6378245.0 / sqrtMagic * Math.cos(radLat)) * Math.PI)
return [lat + dLat2, lng + dLng2]
}
function outOfChina(lat: number, lng: number): boolean {
return (lng < 72.004 || lng > 137.8347 || lat < 0.8293 || lat > 55.8271)
}
function transformLat(x: number, y: number): number {
let ret = -100.0 + 2.0 * x + 3.0 * y + 0.2 * y * y + 0.1 * x * y + 0.2 * Math.sqrt(Math.abs(x))
ret += (20.0 * Math.sin(6.0 * x * Math.PI) + 20.0 * Math.sin(2.0 * x * Math.PI)) * 2.0 / 3.0
ret += (20.0 * Math.sin(y * Math.PI) + 40.0 * Math.sin(y / 3.0 * Math.PI)) * 2.0 / 3.0
ret += (160.0 * Math.sin(y / 12.0 * Math.PI) + 320 * Math.sin(y * Math.PI / 30.0)) * 2.0 / 3.0
return ret
}
function transformLng(x: number, y: number): number {
let ret = 300.0 + x + 2.0 * y + 0.1 * x * x + 0.1 * x * y + 0.1 * Math.sqrt(Math.abs(x))
ret += (20.0 * Math.sin(6.0 * x * Math.PI) + 20.0 * Math.sin(2.0 * x * Math.PI)) * 2.0 / 3.0
ret += (20.0 * Math.sin(x * Math.PI) + 40.0 * Math.sin(x / 3.0 * Math.PI)) * 2.0 / 3.0
ret += (150.0 * Math.sin(x / 12.0 * Math.PI) + 300.0 * Math.sin(x / 30.0 * Math.PI)) * 2.0 / 3.0
return ret
}
```
使用建议:如果你算出来的是 WGS84就先 `wgs84ToGcj02` 再喂给地图组件/服务。
### 2) 最快路径外部地图打开openLocation
```vue
<!-- pages/location/open.uvue -->
<template>
<view class="page">
<button class="btn" @click="open">在地图中查看</button>
</view>
</template>
<script lang="uts">
export default {
data() { return { name: '手环估计位置', lat: 31.23037, lng: 121.4737 } },
methods: {
open() {
// 确保已转换到 GCJ-02如果在中国大陆
const name = this.name
const latitude = this.lat
const longitude = this.lng
uni.openLocation({ latitude, longitude, name } as any)
}
}
}
</script>
<style scoped>
.page{ padding: 24rpx }
.btn{ padding: 16rpx 24rpx; border:1px solid #ddd; border-radius: 8rpx }
</style>
```
- 优点:零集成、稳定、导航能力强。
- 缺点:不在应用内展示。
### 3) 嵌入式地图(内置 `<map>` 组件,展示 marker + 精度圈)
注意:不同 uni-app-x 版本对 `<map>` 的支持程度略有差异。若你的环境不支持该组件,请选择 openLocation 或 UTS 插件路线。
```vue
<!-- pages/location/map.uvue -->
<template>
<view class="page">
<map class="map"
:latitude="centerLat"
:longitude="centerLng"
:scale="18"
:markers="markers"
:circles="circles">
</map>
<view class="toolbar">
<button @click="recenter">回到中心</button>
<button @click="openSysMap">外部地图</button>
</view>
</view>
</template>
<script lang="uts">
import { wgs84ToGcj02 } from '@/utils/coord.uts'
type MapMarker = {
id: number; latitude: number; longitude: number;
iconPath: string; width: number; height: number; callout?: any
}
export default {
data() {
return {
centerLat: 31.23037,
centerLng: 121.4737,
markers: [] as Array<MapMarker>,
circles: [] as Array<any>,
// 原始估算WGS84
estLat: 31.23037,
estLng: 121.4737,
// 不确定性半径(米),可用 RSSI 残差/方差计算
radius: 20
}
},
onLoad() {
this.refreshDisplay()
},
methods: {
refreshDisplay() {
// 估算坐标→ GCJ-02
const r = wgs84ToGcj02(this.estLat, this.estLng)
const lat = r[0] as number
const lng = r[1] as number
this.centerLat = lat
this.centerLng = lng
// 标点
this.markers = [{
id: 1,
latitude: lat,
longitude: lng,
iconPath: '/static/marker.png',
width: 32, height: 32,
callout: { content: '手环估计位置', color: '#000', display: 'ALWAYS' }
}]
// 精度圈
this.circles = [{
latitude: lat,
longitude: lng,
radius: this.radius,
color: '#3b82f680',
fillColor: '#3b82f633',
strokeWidth: 2
}]
},
recenter() {
this.refreshDisplay()
},
openSysMap() {
uni.openLocation({ latitude: this.centerLat, longitude: this.centerLng, name: '手环估计位置' } as any)
}
}
}
</script>
<style scoped>
.page{ display:flex; flex-direction: column; height:100% }
.map{ flex:1; width:100% }
.toolbar{ padding: 12rpx; border-top:1px solid #eee; display:flex; gap: 12rpx }
</style>
```
要点UTS/uni-app-x 规范友好):
- 模板里用简单绑定;数据在 data/方法中预先整理,不在模板做复杂表达式。
- 属性名、数组结构与 `<map>` 组件文档保持一致;一次性替换 `markers/circles`,避免频繁小更新。
### 4) UTS 原生插件路线(当你需要更强能力)
- 场景:内置 `<map>` 组件受限、需要室内图/离线地图/大量覆盖物/轨迹回放/热力图等。
- 做法:在 uni 原生插件市场搜索“高德地图 UTS/定位 UTS”等选择维护活跃的插件。
- 优点:更强渲染与能力接入;缺点:体积与集成复杂度上升。
## 权限与清单Android
- manifest.json 中的 app-plus > android > permission 添加定位权限(通常 uni 默认已包含)。首次进入地图页或获取定位前提示授权。
- 若做后台定位或持续上传,需额外前台服务与权限申明,注意隐私合规。
## UI/交互建议
- 展示“估计位置 + 不确定性半径”:用户能感知误差而非认为是“点精确到米”。
- 支持点击 marker 查看当前 RSSI 源信息(网关数、残差、权重),便于排障。
- 多手环时,颜色/形状区分,支持列表一键定位到某个手环。
- 大量点位更新做节流(例如 500ms 合并一次),减少渲染压力。
---
如果你愿意,我可以在仓库里加一个 `pages/location` 示例页(含坐标转换工具 `utils/coord.uts`),并把你 RSSI 解算出的结果WGS84接上这个页面页面内展示 marker 与精度圈,同时提供“一键外部地图打开”。你只需要告诉我:页面路由名、是否用内置 `<map>` 还是先用 `openLocation` 路线即可。
Similar code found with 2 license types

222
doc_chat/体育课训练.md Normal file
View File

@@ -0,0 +1,222 @@
# 体育课训练(班级统一运动模式)设计与执行方案
本文描述体育课训练场景中教师通过界面为“本班级所有学生”统一开启运动模式并在课堂期间实时查看每位学生手环上传的生物数据与状态的完整方案UI + MQTT/后端 + 异常处理)。
## 目标与范围
- 教师在上课前一键开启“运动模式Training Mode下发至本班全部手环。
- 确认所有手环已进入运动模式(有应答/状态回报),对未响应的设备进行重试或标注异常。
- 上课期间,教师端实时查看本班所有学生的关键指标(心率、步数、血氧、体温、电量、在线状态等),并支持异常提醒。
- 下课时,教师一键结束“运动模式”,保存训练记录与统计。
## 角色与前置条件
- 角色教师Teacher
- 前置条件:
- 学生-手环已完成绑定关系;
- 教师端具备班级管理权限(可获取班级学生清单);
- MQTT 网关已接入并可对设备下发命令与接收上报;
- 教师端可以通过 WebSocket 或 MQTT 查看数据(建议 WebSocket 由网关聚合推送)。
## 教师端 UI 方案(页面:体育课训练)
页面入口建议:`/pages/sport/class-training/index`(名称可在 `pages.json` 注册)。
布局:
- 顶部:班级选择(默认当前班级)+ 当前训练状态(未开始/进行中/已结束)+ 开始/结束按钮。
- 中部:学生网格/列表卡片:头像/姓名/学号/设备号/在线状态/电量/实时心率/步数/告警标记。
- 侧栏/下拉:未响应设备清单与操作(重试、标记缺席/设备故障)。
- 流程提示条:显示“已应答/未应答”的汇总进度条。
因为要显示很多学生信息一般训练时一个班级大概是50-60人左右所以我想把每个人的信息包装成小的card类型的component方式同时每个card里面展示的信息以颜色阈值为区分如果超过警示的要把卡片排到最前面应该设置多个阈值而且可调。
交互流程:
1) 开始训练
- 教师点击“开始训练”→ 弹出确认对话框本班X人预计时长是否立即开始
- 确认后:发起“班级训练会话”并向本班全部设备下发“进入运动模式”命令;
- 页面进入“等待应答”态,显示进度(已应答/未应答60秒未应答弹出“重试/忽略/标记异常”选项。
2) 训练进行中
- 学生卡片实时显示指标:
- 在线/离线(离线>30s标红/提醒)
- 心率(区间/颜色阈值)
- 步数(累计)
- 血氧/体温(可选)
- 电量(低电<20%提醒)
- 支持筛选:仅看异常、仅看未在线、仅看低电、仅看某个阈值以上心率。
- 支持点开某个学生查看最近1-5分钟趋势小图sparklines
3) 结束训练
- 教师点击“结束训练”→ 确认后向本班设备下发“退出运动模式”命令;
- 聚合保存本次训练会话(时长、参与人数、异常数、平均/峰值心率等),并生成简要报表。
## 执行方案MQTT 主题/消息 + 网关)
为保证简单可靠采用“教师端→网关→MQTT”的下发通道与“设备→MQTT→网关→教师端”的上报通道
主题约定(示例):
- 设备上行(遥测/状态)
- `iot/devices/{device_id}/telemetry`QoS0/1心率、步数、血氧、体温、电量等
- `iot/devices/{device_id}/status`QoS1保留在线/离线、固件版本、运动模式开关
- `iot/devices/{device_id}/ack`QoS1对命令的应答
- 设备下行(命令)
- `iot/devices/{device_id}/cmd`QoS1单设备命令
- 网关聚合通知(推给教师端 WebSocket 或 MQTT 客户端)
- `iot/classes/{class_id}/stream`WebSocket通道ID或服务端推送频道供教师端消费
命令与应答JSON 负载格式建议):
```json
// 下行:进入运动模式
{
"type": "training.start",
"tenant_id": "t-001",
"class_id": "c-0901",
"training_id": "trn_20250925_001",
"issued_at": 1695619200000,
"expire_secs": 60,
"sampling_interval_secs": 5,
"metrics": ["hr","steps","spo2","temp","battery"]
}
```
```json
// 上行应答:
{
"type": "ack",
"cmd": "training.start",
"training_id": "trn_20250925_001",
"device_id": "dev_abc123",
"student_id": "stu_10001",
"ok": true,
"at": 1695619203500,
"fw": "1.4.2",
"note": "ready"
}
```
```json
// 遥测:
{
"type": "telemetry",
"training_id": "trn_20250925_001",
"device_id": "dev_abc123",
"student_id": "stu_10001",
"ts": 1695619210000,
"hr": 112,
"steps": 350,
"spo2": 98,
"temp": 36.5,
"battery": 72
}
```
会话与关联:
- 启动时由网关生成 `training_id` 并持久化(班级、发起教师、开始时间、目标人数)。
- 每条应答/遥测均携带 `training_id` 用于快速聚合与展示。
- 设备也可通过 `status` 保留消息标注 `training_mode: true/false`,便于后续“断线重连”后立即恢复状态。
QoS/保留/超时建议:
- 命令QoS1可选保留=false应答QoS1遥测QoS0/1按带宽权衡
- 保留消息:`status` 使用保留,保证新订阅可立即获知设备当前模式与在线状态。
- 超时:下发后 60s 内未应答则判定“未响应”提供一键重试最多2-3次
安全与权限:
- 教师端不直接向 MQTT 发命令,统一走网关(鉴权、配额、风控)。
- 网关校验教师对 `class_id` 的管理权限,并仅向该班学生设备(或绑定设备)下发命令。
## 网关server/gateway-mqtt-node对接
建议在网关提供两类接口:
1) REST 操作(供教师端触发)
- `POST /api/classes/{class_id}/training/start`
- 入参:`sampling_interval_secs``metrics`(可选)、`expected_count`
- 出参:`training_id``sent_count`
- `POST /api/classes/{class_id}/training/stop`
- 入参:`training_id`
- 出参:`stopped: true`
2) WebSocket 推送(供教师端订阅显示)
- 频道:`/ws/classes/{class_id}/training/{training_id}`
- 消息:三类事件
- `ack`(应答进度)
- `telemetry`(实时数据)
- `summary`(阶段统计/结束汇总)
> 教师端也可直接使用 MQTT 订阅聚合主题,但 WebSocket 方案对移动端/小程序网络环境更友好。
## 数据存储与报表(可选增强)
- 表:`training_sessions`(会话元数据)、`training_participants`(参与学生/设备)、`training_metrics_live`(近实时缓存,用于课堂展示)、`training_metrics_hist`(历史明细/聚合,课后报表)。
- 课后可生成统计:参与率、平均/峰值心率、总步数、异常次数(离线、低电、心率异常区间)。
### Supabase 实时事件流表设计
- 表:`public.training_stream_events`
- 主键 `id` (`uuid`),默认 `gen_random_uuid()`
- 必填 `training_id` (`uuid`) 对应课堂训练会话;后续 `training_sessions` 表建好后可添加外键。
- `event_type` (`varchar(32)`),值域:`ack` / `telemetry` / `metrics` / `state` / `summary` 等。
- `class_id` (`uuid`,非空) 引用 `ak_classes`RLS 主要按照班级授权。
- `student_id` (`uuid`,可空) 引用 `ak_users`,若为空表示班级级别事件。
- `device_id` (`uuid`,可空) 引用 `ak_devices`,为空表示未绑定手环。
- `status` (`varchar(64)`)、`ack` (`bool`)、`metrics` (`jsonb`)、`payload` (`jsonb`) 存储结构化指标和原始载荷。
- `ingest_source` (`varchar(32)`,默认 `gateway`)、`ingest_note` (`text`) 记录事件来源或模拟批次说明。
- `recorded_at` / `ingested_at` (`timestamptz`) 记录设备事件发生时间与落库时间。
- 索引:分别在 `(training_id, recorded_at desc)``(class_id, recorded_at desc)``(student_id, recorded_at desc)``(device_id, recorded_at desc)``event_type` 上建立,方便实时排序和过滤。
- Realtime脚本自动将表加入 `supabase_realtime` publication。
- 视图:`public.training_stream_latest_metrics` 使用 `DISTINCT ON` 获取每名学生最新的 telemetry/metrics 事件,页面可直接订阅该视图聚合结果。
- 行级安全RLS
- `service_role full access`Edge Function / Gateway 可以全量读写(`auth.role() = 'service_role'`)。
- `teacher read class training events`:教师凭 `ak_teacher_roles``current_user_has_permission` 读取自己负责班级的事件。
- `student read own training events`:学生仅能读取 `student_id` 等于自身的记录。
- `teacher insert training events`:允许具备班级权限的教师在课堂模拟工具里写入事件,或由服务角色写入。
- `service role delete training events`:仅服务角色可执行删除用于清理。
- 脚本位置:`create_training_stream_events.sql`
## 异常与边界处理
- 设备离线:`status` 超过 30s 未更新 → 标记“离线”,教师端置顶并提示。
- 未应答:首次下发后 60s 未应答 → 可重试连续2次失败→ 标记为“未上线/设备异常/未佩戴”。
- 低电:`battery < 20` 高亮提示;`<10` 触发告警。
- 断点续传:设备重连后,读取 `status` 保留消息恢复 `training_mode`;教师端无需手动干预。
- 结束异常:若下课 stop 未成功(网关/设备异常),设备端可设定超时自动退出训练模式(例如 3 小时)。
## 教师端页面验收清单
- [ ] 可选择班级并显示班级学生清单与绑定设备号
- [ ] 点击“开始训练”后,出现应答进度条与“未响应列表”
- [ ] 训练中:实时刷新所有学生关键指标,异常醒目
- [ ] 可筛选/排序(异常优先)
- [ ] “结束训练”后生成本次训练的小结
- [ ] 断线/重连后页面自动恢复到当前训练会话
## 附:命令/应答/遥测数据示例
```json
// 结束训练命令
{
"type": "training.stop",
"tenant_id": "t-001",
"class_id": "c-0901",
"training_id": "trn_20250925_001",
"issued_at": 1695619800000
}
```
```json
// 设备状态(保留)
{
"type": "status",
"device_id": "dev_abc123",
"student_id": "stu_10001",
"online": true,
"training_mode": true,
"fw": "1.4.2",
"battery": 72,
"ts": 1695619210000
}
```
---
如需,我可以:
- 创建 `pages/sport/class-training/index.uvue` 的页面骨架UTS 友好)、注册路由、接上网关 WebSocket
-`server/gateway-mqtt-node` 内补充上述 REST 与 WS 接口示例,并提供一个内置 MQTT 模拟器用于联调。

301
doc_chat/微通信.md Normal file
View File

@@ -0,0 +1,301 @@
# 微通信架构与端到端流程设计MQTT/Kafka/Supabase/ClickHouse + 个推)
本方案围绕当前既有逻辑“设备/客户端通过 MQTT 上报 →Kafka 或)直接写入 Supabase → 前端 chat/index 接收 → 前端发送消息至 MQTT 并写入 Supabase”进行系统化设计覆盖后端组件、数据流、推送、可靠性与前端实现要点。
## 1. 总览
- 目标
- 低延迟、多终端的聊天与事件通知,既支持 App/Web也支持物联设备/边缘端。
- 统一消息落库PostgreSQL on Supabase+ 实时分发Supabase Realtime / MQTT
- 推送采用“个推”在 App 后台或离线时补充通知到达率。
- 核心组件
- MQTT BrokerEMQX/Mosquitto收消息/发消息的设备级/轻端入口QoS≥1。
- 网关服务 gateway-mqtt可选内置 Kafka Bridge
- 认证/ACL、协议转换、消息校验与标准化。
- 转发到 Kafkachat.inbound或直接调用 SupabaseREST/RPC/pg落库。
- Kafka可选/增强):
- 主题chat.inbound入口、chat.persisted落库成功后、chat.push通知任务
- 解耦高峰、可回放、做扩展处理(统计、风控、审计)。
- SupabasePostgres + Realtime + Edge Functions
-chat_conversations / chat_participants / chat_messages / chat_notifications已建表`doc_chat/create_chat_tables.sql`)。
- 触发器chat_on_message_insert更新 last_message_at + 写通知)。
- RLS按参与者隔离访问已配置
- Realtime前端订阅消息/通知的实时流。
- ClickHouse列存/OLAP/时序聚合):
- 承载健康/传感类高频时序数据(大跨度查询、长保留、低成本)。
- 与 Kafka/Telegraf 对接,建立明细表 + 物化视图做分钟/小时聚合。
- 网关运行报表Supabase 内):
- 表:`gateway_nodes``gateway_heartbeats`(见 `doc_chat/create_gateway_reporting.sql`)。
- Node 网关周期写入心跳与计数,供看板与告警使用。
- push-notify-worker个推
- 通过 Supabase Webhook/Edge Function 或 Kafka chat.push 触发,调用个推 API 下发推送。
## 2. 事件/消息模型
采用统一 envelope参考 CloudEvents 思想):
```json
{
"id": "<uuid>", // 客户端生成/网关补齐(用于幂等)
"ts": "2025-09-23T10:00:00Z", // ISO 时间
"type": "chat.message", // 事件类型
"source": "mqtt|web|server", // 来源
"conversation_id": "...",
"sender_id": "...",
"content": "...", // 文本或 URL音频/文件)
"content_type": "text|audio|image|file|markdown|json",
"metadata": {"duration_ms": 1234, "mime": "audio/mpeg", "size": 102400}
}
```
- 客户端发送到 MQTT 主题时携带此结构;
- 网关校验/补齐后落库为 chat_messages 的一行content/metadata 等字段对应)。
另:健康/传感时序Telemetry建议采用“轻 envelope + 列存模型”的二段式:
```json
{
"id": "<uuid>",
"ts": "2025-09-23T10:00:00Z",
"type": "ts.metric",
"user_id": "...",
"device_id": "...",
"metric": "hr|spo2|temp|bp_systolic|bp_diastolic|steps",
"value": 72.0,
"meta": { "unit": "bpm", "src": "ble" }
}
```
由网关投递至 Kafka `ts.health`,并落地 ClickHouse `health_raw`(示例见 `doc_chat/kafka_mqtt_clickhouse.md`)。
## 3. 主题与分区规划MQTT/Kafka
- MQTT 主题
- chat/send/{conversationId}:客户端→服务端,发送消息入口。
- chat/recv/{conversationId}:服务端→客户端,转发已确认写库的消息(可选)。
- presence/{userId}:在线状态(使用 Last Will 设置离线告警)。
- ack/{messageId}/{userId}(可选):消息已达/已读 ACK 路由。
- 建议 QoS1至少一次禁用 retain聊天消息不保留presence 可保留。
- ACL
- 仅参与者可发布 chat/send/{conversationId}
- 仅参与者可订阅 chat/recv/{conversationId}
- 用户仅能发布/订阅自身 presence/{userId} 与 ack 自己路由。
- Kafka 主题(可选)
- chat.inboundkey=conversationId保证同会话有序
- chat.persisted写库成功后的广播用于下游计算或转发
- chat.push推送任务。
- ts.healthkey=user_id 或 device_id健康/传感数据入口,供 ClickHouse 消费。
## 4. 后端流程设计
```text
[Client/Device]
| MQTT publish chat/send/{conversationId}
v
[gateway-mqtt]
| 校验/标准化/幂等
|--(A) 直写 Supabase PostgREST/Edge Function --> [Postgres]
|--(B) 投递 Kafka chat.inbound ---------------> [persist-worker -> Postgres]
|(trigger) -> chat_notifications
|(optional) -> MQTT chat/recv/{conversationId}
| -> Kafka chat.push
v
[push-notify-worker -> 个推]
---- Telemetry 时序流 ----
|--(T1) 网关将时序上报写入 Kafka ts.health ------> [ClickHouse 明细表 health_raw]
|--(T2) 物化/降采样视图做分钟/小时聚合 ----------> [Grafana 可视化]
|--(T3) 最近快照/告警回写 Supabase可选 ------> [user_metrics_last / alerts]
```
### 4.1 MQTT → Supabase入口
1) 客户端/设备 发布到 chat/send/{conversationId}QoS1
2) gateway-mqtt 收到消息:
- 校验 JWT/设备证书,鉴权是否会话参与者;
- 解析 envelope若无 id 则补 uuid
- 幂等检查可选Redis setNx(messageId) 窗口 24h
3) 落库:
- 方案 A直写 Supabase PostgRESTchat_messages.insert或 RPC。
- 方案 B写入 Kafka chat.inbound → persist-worker 落库(高峰更稳)。
4) Postgres 触发器 chat_on_message_insert
- 更新 chat_conversations.last_message_at
- 插入 chat_notifications除发送者外所有参与者
5) 可选:服务端再发布到 MQTT chat/recv/{conversationId}(供仅连 MQTT 的轻端即时收到),或依赖 Supabase Realtime 给连 H5/App 的用户推送。
### 4.2 Supabase → 个推(通知)
- 触发方式:
- Edge Function 订阅 Postgres Changes或 Supabase Webhook监听 chat_notifications insert
- 或 Kafka chat.push 消费者。
- push-notify-worker
- 查询 userId 的个推 cid
- 生成通知文案(可带会话标题、消息摘要、角标);
- 调用个推 API 下发;记录投递结果(可回写到一个 delivery_logs 表)。
### 4.3 前端发送chat/index
1) 音频/附件:先 S3 预签名上传,获得 URL 与 metadata。
2) 生成消息 iduuid组装 envelope。
3) 并行/串行两路:
- 发布到 MQTT chat/send/{conversationId}(低延迟通知网关/其他 MQTT 客户端)。
- 调用 Supabase 插入 chat_messages确保落库
4) UI 乐观更新;若落库失败则回滚/重试。
### 4.4 前端接收
- H5/App连 Supabase Realtime
- 订阅 chat_messagesINSERT与 chat_notificationsINSERT
- 房间内做去重(根据 id已在 room.uvue 实现)。
- 仅 MQTT 客户端:
- 订阅 chat/recv/{conversationId} 主题获取已确认消息(可由网关回发)。
### 4.5 Telemetry → ClickHouse健康/传感数据)
1) 设备/网关 MQTT 上报(可以复用同一 broker不同主题前缀如 `ts/send/{deviceId}`)。
2) gateway-mqtt 将清洗后的数据写入 Kafka `ts.health`key 使用 user_id 或 device_id 保证局部有序)。
3) ClickHouse 落地:明细表 `health_raw`MergeTree按月份分区TTL 保留),并建立 1m/1h 物化视图。
4) 前端时序图Grafana 直连 ClickHouse应用内查询可通过后端代理统一鉴权参见 `kafka_mqtt_clickhouse.md`)。
5) 最近快照/告警回写 Supabase`user_metrics_last`/`alerts`),与业务数据/权限体系对齐。
## 5. 推送(个推)策略
- 触发:插入 chat_notifications 后对目标用户触发推送。
- 去重:个推透传携带 message_id/conversation_id客户端去重。
- 点击动作:深链到 pages/sport/chat/room?conversation_id=xxx。
- 频率限制:同会话短时多条合并为“你有 X 条新消息”。
- 前台/后台判定:前台不推或用本地通知;后台/离线才用个推。
## 6. 安全与权限
- MQTT 认证JWT携带 user_id、过期时间或 TLS 证书ACL 映射会话参与者。
- Supabase RLS已基于 chat_participants确保 Edge Function 走 service role 仅内部调用。
- S3 附件:仅用预签名上传;公有读或 CDN 鉴权按需配置。
- Idempotencymessage_id 唯一约束Postgres 可加唯一索引),网关 Redis 幂等窗。
- TelemetryClickHouse 作为原始明细库不直接暴露到前端;前端通过后端代理或预聚合接口访问,鉴权仍以 Supabase 用户为中心。
## 7. 可靠性与一致性
- 顺序:以 conversationId 为 key 的 Kafka 分区(或直接依赖单库顺序 + created_at 排序)。
- 重试:
- 前端 AkReq 已支持网络重试;
- 网关→Supabase 写库失败使用指数退避重试;
- push 失败重试并退避;
- 断线:
- MQTT QoS1 + Clean Session
- 前端 Realtime 自动重连(建议在 AkSupaRealtime 增加重连与订阅恢复)。
- 未读计数:
- 客户端进入会话更新 chat_participants.last_read_at
- 会话列表通过 last_message_at 与 last_read_at 计算角标(服务端或客户端)。
- 网关运行健康:
- Node 网关定期上报 `gateway_heartbeats`CPU/内存/连接状态/消息计数/错误等)。
- 提供 `gateway_status_latest` 视图与 `gateway_daily_stats` 物化视图做看板/告警。
## 8. 与现有表结构的映射
- chat_messages.content 存文本或 S3 URLcontent_type 包含 "audio"(已支持)。
- chat_on_message_insert 已生成 chat_notifications供 Realtime 与个推使用。
- 参与者列表通过 chat_participants 查询;权限与 RLS 已覆盖。
- 网关运行报表:
- `gateway_nodes`:注册网关(以 `mqtt_client_id` 作为唯一标识)。
- `gateway_heartbeats`:周期心跳与计数;`gateway_status_latest` 展示最近一次状态。
- SQL 见 `doc_chat/create_gateway_reporting.sql`
## 9. 前端实现要点(结合现有代码)
- 单 WS 连接多订阅:`ChatDataService.ensureRealtime()` 已实现。
- 房间消息去重:`room.uvue` 已按 id 去重。
- 录音与上传:`AudioUploadService` 预签名 + multipart返回 URL 后调用 `sendAudioMessage()`
- 发送路径:保持 MQTT 发布 + Supabase 插入的“双写”,若担心两者不一致,可切换为“仅插库 → 后端转发到 MQTT”。
- 订阅释放:路由切换时调用 `dispose()``closeRealtime()` 清理。
- Telemetry 展示:趋势/历史直接走 ClickHouseGrafana 或后端代理 API业务侧角标/卡片展示用 Supabase 快照表(低延迟)。
## 10. 运维与监控
- 网关与推送服务加 Prometheus 指标QPS、失败率、延时、重试数。
- Kafka Lag 监控(若使用 Kafka
- Postgres 指标与 Realtime 通道数监控。
- ClickHouse插入延迟、分区大小、查询耗时、物化视图滞后Grafana 有现成数据源。
- 个推调用成功率,退避与熔断策略。
- 统一 compose 部署(见 `doc_chat/docker-compose.yml``kafka``redis``clickhouse``grafana``mqtt` 一体化联调。
## 11. 最小可行落地顺序
1) 网关直写 Supabase跳过 Kafka完成 MQTT → DB → Realtime 闭环;
2) 接入个推:从 chat_notifications 插入触发;
3) 加上 MQTT 回发chat/recv供仅 MQTT 客户端;
4) 引入 Kafka 做解耦与扩展(如需要);
5) 增强 Realtime 重连与订阅恢复;完善未读角标与已读回执。
6) Telemetry 引入 ClickHouse接入 Kafka `ts.health` → 明细表 + 物化视图;前端改走 ClickHouse/Grafana。
## 12. 配置与文件索引(本仓库)
- Docker 一体化:`doc_chat/docker-compose.yml`
- MQTT Broker 配置:`doc_chat/mosquitto/config/mosquitto.conf`
- 网关服务:
- 代码:`server/gateway-mqtt-node/src/index.js`MQTT→Kafka/Supabase、心跳上报
- Worker`server/gateway-mqtt-node/src/worker.js`Kafka chat.inbound → Supabase 持久化)
- 环境模板:`server/gateway-mqtt-node/.env.example`
- 聊天表结构与触发器:`doc_chat/create_chat_tables.sql`
- 网关运行报表:`doc_chat/create_gateway_reporting.sql`
- 时序选型与 ClickHouse 方案:`doc_chat/kafka_mqtt_clickhouse.md`
---
如需我生成网关示例Node.js/Go鉴权+插库)或 Supabase Edge Function 模板(监听 notifications → 个推),告诉我技术栈偏好即可。
---
## 附录:按会话划分 Supabase Realtime 通道方案
### 背景
- 现状:前端通过单一 `chat_messages` 订阅获取所有对话的 INSERT 事件,再由客户端根据 `conversation_id` 做过滤。
- 问题:
- 参加多个会话的用户会收到不相关会话的通知,虽然 UI 再过滤,但仍产生额外的网络和序列化开销。
- 如果后续需要对同一个 Supabase Realtime 连接做权限隔离(例如服务端使用 service key 代理订阅),需要显式限定查询范围。
### 设计目标
1. 每个会话使用独立的 Realtime channel通过 Postgres Changes 的 `filter``event` 参数仅推送该会话的数据。
2. 在保持单连接多订阅的基础上,动态创建/关闭频道,避免过多 WebSocket 连接。
3. 兼容移动端uni-app X与 Web同步维护订阅生命周期。
### 频道命名与过滤
| 场景 | Supabase channel 名称 | Postgres Changes 配置 |
|------|------------------------|------------------------|
| 会话消息 | `chat:msg:<conversationId>` | `{ event: "INSERT", schema: "public", table: "chat_messages", filter: "conversation_id=eq.<conversationId>" }` |
| 会话通知 | `chat:notify:<conversationId>`(可选) | `{ event: "INSERT", schema: "public", table: "chat_notifications", filter: "conversation_id=eq.<conversationId>" }` |
> 命名规则保留 `chat:` 前缀,方便统一管理;`conversationId` 建议做 URL encode 避免特殊字符。
### 前端改造步骤
1. **订阅管理器**:在 `ChatDataService.ensureRealtime()` 基础上,增加 `ensureConversationChannel(conversationId)`
- 若已有 `convChannels[conversationId]` 直接返回。
- 否则调用 `realtime.channel(name)`,并使用 `channel.on('postgres_changes', {...})` 设置监听。
- 调用 `channel.subscribe()`,成功后写入 Map失败时重试或回退为 table 级订阅。
2. **进入会话**`room.uvue` 调用 `ChatDataService.subscribeMessages(conversationId)` 时,改为:
- `ensureConversationChannel(conversationId)`
- 在 channel 回调里只派发该会话的消息(无需再过滤)。
3. **退出会话**:在 `onUnload/onHide` 调用 `ChatDataService.releaseConversationChannel(conversationId)`
- 取消监听,调用 `channel.unsubscribe()` 并从 Map 移除。
- 可设置闲置超时,避免频繁退出/进入导致频繁订阅。
4. **多会话同时打开**(例如侧边栏预览):允许同时保持多个频道订阅,根据 UI 激活状态决定是否真正渲染进入消息队列。
### 权限与安全
- 使用匿名/终端用户 token 连接 Realtime 时RLS 会自动限制仅返回参与者可访问的 `conversation_id`。即便如此,按会话过滤可以进一步降低服务端负载。
- 若使用 service key 统一代理订阅(例如后端服务转发消息),务必在 channel 过滤项中带上目标会话 ID避免泄露。
### 服务器端同步策略
- 若网关或中间层也监听 Realtime可采用同样的 channel 命名规范,或通过 `joinTopicIfNeeded('chat:msg:<id>', payload)` 方式统一管理。
- Channel 生命周期可和应用级订阅保持一致,并在服务器端维护引用计数,最后一个消费者离开时再真正 `unsubscribe()`
### 回退策略
- 如果遇到 Supabase 版本限制无法使用 `filter`,可以退回到表级订阅,但保留 channel 命名(仍使用 `chat:msg:<id>`),在客户端回调内做过滤。这样可以平滑升级。
- 同理,在网络不佳时可侦测订阅失败并自动切换到现有的“全量监听 + 客户端过滤”。
---
该方案不会改变已有聊天流程,只是优化 Realtime 的订阅粒度,实现“每个会话一个 channel”。配合现有的缓存/去重逻辑可直接落地,下一步可在 `ChatDataService` 中实现上述方法并编写单元测试覆盖多订阅场景。