1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus connections
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*/
#ifndef __CONNECTION_H
#define __CONNECTION_H
#include <linux/list.h>
#include <linux/kfifo.h>
#define GB_CONNECTION_FLAG_CSD BIT(0)
#define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1)
#define GB_CONNECTION_FLAG_OFFLOADED BIT(2)
#define GB_CONNECTION_FLAG_CDSI1 BIT(3)
#define GB_CONNECTION_FLAG_CONTROL BIT(4)
#define GB_CONNECTION_FLAG_HIGH_PRIO BIT(5)
#define GB_CONNECTION_FLAG_CORE_MASK GB_CONNECTION_FLAG_CONTROL
enum gb_connection_state {
GB_CONNECTION_STATE_DISABLED = 0,
GB_CONNECTION_STATE_ENABLED_TX = 1,
GB_CONNECTION_STATE_ENABLED = 2,
GB_CONNECTION_STATE_DISCONNECTING = 3,
};
struct gb_operation;
typedef int (*gb_request_handler_t)(struct gb_operation *);
struct gb_connection {
struct gb_host_device *hd;
struct gb_interface *intf;
struct gb_bundle *bundle;
struct kref kref;
u16 hd_cport_id;
u16 intf_cport_id;
struct list_head hd_links;
struct list_head bundle_links;
gb_request_handler_t handler;
unsigned long flags;
struct mutex mutex;
spinlock_t lock;
enum gb_connection_state state;
struct list_head operations;
char name[16];
struct workqueue_struct *wq;
atomic_t op_cycle;
void *private;
bool mode_switch;
};
struct gb_connection *gb_connection_create_static(struct gb_host_device *hd,
u16 hd_cport_id, gb_request_handler_t handler);
struct gb_connection *gb_connection_create_control(struct gb_interface *intf);
struct gb_connection *gb_connection_create(struct gb_bundle *bundle,
u16 cport_id, gb_request_handler_t handler);
struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle,
u16 cport_id, gb_request_handler_t handler,
unsigned long flags);
struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle,
u16 cport_id, unsigned long flags);
void gb_connection_destroy(struct gb_connection *connection);
static inline bool gb_connection_is_static(struct gb_connection *connection)
{
return !connection->intf;
}
int gb_connection_enable(struct gb_connection *connection);
int gb_connection_enable_tx(struct gb_connection *connection);
void gb_connection_disable_rx(struct gb_connection *connection);
void gb_connection_disable(struct gb_connection *connection);
void gb_connection_disable_forced(struct gb_connection *connection);
void gb_connection_mode_switch_prepare(struct gb_connection *connection);
void gb_connection_mode_switch_complete(struct gb_connection *connection);
void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
u8 *data, size_t length);
void gb_connection_latency_tag_enable(struct gb_connection *connection);
void gb_connection_latency_tag_disable(struct gb_connection *connection);
static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection)
{
return !(connection->flags & GB_CONNECTION_FLAG_CSD);
}
static inline bool
gb_connection_flow_control_disabled(struct gb_connection *connection)
{
return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL;
}
static inline bool gb_connection_is_offloaded(struct gb_connection *connection)
{
return connection->flags & GB_CONNECTION_FLAG_OFFLOADED;
}
static inline bool gb_connection_is_control(struct gb_connection *connection)
{
return connection->flags & GB_CONNECTION_FLAG_CONTROL;
}
static inline void *gb_connection_get_data(struct gb_connection *connection)
{
return connection->private;
}
static inline void gb_connection_set_data(struct gb_connection *connection,
void *data)
{
connection->private = data;
}
#endif /* __CONNECTION_H */
|