1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
|
/*
* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MSM_CLOCK_GENERIC_H
#define __MSM_CLOCK_GENERIC_H
#include <linux/clk/msm-clk-provider.h>
#include <linux/of.h>
/**
* struct fixed_clk - fixed rate clock
* @c: clk
*/
struct fixed_clk {
struct clk c;
};
/* ==================== Mux clock ==================== */
struct mux_clk;
struct clk_mux_ops {
int (*set_mux_sel)(struct mux_clk *clk, int sel);
int (*get_mux_sel)(struct mux_clk *clk);
/* Optional */
bool (*is_enabled)(struct mux_clk *clk);
int (*enable)(struct mux_clk *clk);
void (*disable)(struct mux_clk *clk);
void __iomem *(*list_registers)(struct mux_clk *clk, int n,
struct clk_register_data **regs, u32 *size);
};
#define MUX_SRC_LIST(...) \
.parents = (struct clk_src[]){__VA_ARGS__}, \
.num_parents = ARRAY_SIZE(((struct clk_src[]){__VA_ARGS__}))
#define MUX_REC_SRC_LIST(...) \
.rec_parents = (struct clk * []){__VA_ARGS__}, \
.num_rec_parents = ARRAY_SIZE(((struct clk * []){__VA_ARGS__}))
struct mux_clk {
/* Parents in decreasing order of preference for obtaining rates. */
struct clk_src *parents;
int num_parents;
/* Recursively search for the requested parent in rec_parents. */
struct clk **rec_parents;
int num_rec_parents;
struct clk *safe_parent;
int safe_sel;
unsigned long safe_freq;
/*
* Before attempting a clk_round_rate on available sources, attempt a
* clk_get_rate on all those sources. If one of them is already at the
* necessary rate, that source will be used.
*/
bool try_get_rate;
struct clk_mux_ops *ops;
/*
* Set if you need the mux to try a new parent before falling back to
* the current parent. If the safe_parent field above is set, then the
* safe_sel intermediate source will only be used if we fall back to
* to the current parent during mux_set_rate.
*/
bool try_new_parent;
/* Fields not used by helper function. */
void *const __iomem *base;
u32 offset;
u32 en_offset;
u32 mask;
u32 shift;
u32 en_mask;
/*
* Set post divider for debug mux in order to divide the clock
* by post_div + 1.
*/
u32 post_div;
int low_power_sel;
void *priv;
struct clk c;
};
static inline struct mux_clk *to_mux_clk(struct clk *c)
{
return container_of(c, struct mux_clk, c);
}
extern struct clk_ops clk_ops_gen_mux;
/* ==================== Divider clock ==================== */
struct div_clk;
struct clk_div_ops {
int (*set_div)(struct div_clk *clk, int div);
int (*get_div)(struct div_clk *clk);
bool (*is_enabled)(struct div_clk *clk);
int (*enable)(struct div_clk *clk);
void (*disable)(struct div_clk *clk);
void __iomem *(*list_registers)(struct div_clk *clk, int n,
struct clk_register_data **regs, u32 *size);
};
struct div_data {
unsigned int div;
unsigned int min_div;
unsigned int max_div;
unsigned long rate_margin;
/*
* Indicate whether this divider clock supports half-interger divider.
* If it is, all the min_div and max_div have been doubled. It means
* they are 2*N.
*/
bool is_half_divider;
/*
* Skip odd dividers since the hardware may not support them.
*/
bool skip_odd_div;
bool skip_even_div;
bool allow_div_one;
unsigned int cached_div;
};
struct div_clk {
struct div_data data;
/*
* Some implementations may require the divider to be set to a "safe"
* value that allows reprogramming of upstream clocks without violating
* voltage constraints.
*/
unsigned long safe_freq;
/* Optional */
struct clk_div_ops *ops;
/* Fields not used by helper function. */
void *const __iomem *base;
u32 offset;
u32 mask;
u32 shift;
u32 en_mask;
void *priv;
struct clk c;
};
static inline struct div_clk *to_div_clk(struct clk *c)
{
return container_of(c, struct div_clk, c);
}
extern struct clk_ops clk_ops_div;
extern struct clk_ops clk_ops_slave_div;
struct ext_clk {
struct clk c;
struct device *dev;
char *clk_id;
};
long parent_round_rate(struct clk *c, unsigned long rate);
unsigned long parent_get_rate(struct clk *c);
int parent_set_rate(struct clk *c, unsigned long rate);
static inline struct ext_clk *to_ext_clk(struct clk *c)
{
return container_of(c, struct ext_clk, c);
}
extern struct clk_ops clk_ops_ext;
#define DEFINE_FIXED_DIV_CLK(clk_name, _div, _parent) \
static struct div_clk clk_name = { \
.data = { \
.max_div = _div, \
.min_div = _div, \
.div = _div, \
}, \
.c = { \
.parent = _parent, \
.dbg_name = #clk_name, \
.ops = &clk_ops_div, \
CLK_INIT(clk_name.c), \
} \
}
#define DEFINE_FIXED_SLAVE_DIV_CLK(clk_name, _div, _parent) \
static struct div_clk clk_name = { \
.data = { \
.max_div = _div, \
.min_div = _div, \
.div = _div, \
}, \
.c = { \
.parent = _parent, \
.dbg_name = #clk_name, \
.ops = &clk_ops_slave_div, \
CLK_INIT(clk_name.c), \
} \
}
#define DEFINE_EXT_CLK(clk_name, _parent) \
static struct ext_clk clk_name = { \
.c = { \
.parent = _parent, \
.dbg_name = #clk_name, \
.ops = &clk_ops_ext, \
CLK_INIT(clk_name.c), \
} \
}
/* ==================== Mux Div clock ==================== */
struct mux_div_clk;
/*
* struct mux_div_ops
* the enable and disable ops are optional.
*/
struct mux_div_ops {
int (*set_src_div)(struct mux_div_clk *, u32 src_sel, u32 div);
void (*get_src_div)(struct mux_div_clk *, u32 *src_sel, u32 *div);
int (*enable)(struct mux_div_clk *);
void (*disable)(struct mux_div_clk *);
bool (*is_enabled)(struct mux_div_clk *);
void __iomem *(*list_registers)(struct mux_div_clk *md, int n,
struct clk_register_data **regs, u32 *size);
};
/*
* struct mux_div_clk - combined mux/divider clock
* @priv
parameters needed by ops
* @safe_freq
when switching rates from A to B, the mux div clock will
instead switch from A -> safe_freq -> B. This allows the
mux_div clock to change rates while enabled, even if this
behavior is not supported by the parent clocks.
If changing the rate of parent A also causes the rate of
parent B to change, then safe_freq must be defined.
safe_freq is expected to have a source clock which is always
on and runs at only one rate.
* @parents
list of parents and mux indicies
* @ops
function pointers for hw specific operations
* @src_sel
the mux index which will be used if the clock is enabled.
* @try_get_rate
Set if you need the mux to directly jump to a source
that is at the desired rate currently.
* @force_enable_md
Set if the mux-div needs to be force enabled/disabled during
clk_enable/disable.
*/
struct mux_div_clk {
/* Required parameters */
struct mux_div_ops *ops;
struct div_data data;
struct clk_src *parents;
u32 num_parents;
struct clk c;
/* Internal */
u32 src_sel;
/* Optional parameters */
void *priv;
void __iomem *base;
u32 div_mask;
u32 div_offset;
u32 div_shift;
u32 src_mask;
u32 src_offset;
u32 src_shift;
u32 en_mask;
u32 en_offset;
u32 safe_div;
struct clk *safe_parent;
unsigned long safe_freq;
bool try_get_rate;
bool force_enable_md;
};
static inline struct mux_div_clk *to_mux_div_clk(struct clk *clk)
{
return container_of(clk, struct mux_div_clk, c);
}
extern struct clk_ops clk_ops_mux_div_clk;
/* ==================== Virtual clock ==================== */
struct virtclk_front {
int id;
struct clk c;
u32 flag;
};
extern struct clk_ops virtclk_front_ops;
int msm_virtclk_front_probe(struct platform_device *pdev,
struct clk_lookup *table,
size_t size);
#endif
|